File size: 2,485 Bytes
24b3561 44ffac2 bc13da5 dca4917 2b40235 c7e9b30 6e6c50d 1b9a17f 00fe6aa 5df5650 56b2efd 60bb8f2 76c4dcc c3761e5 5cae6d8 6578a93 545dd44 b01437c edb36a0 20a7976 e663e95 679c5b6 541a874 9c25485 3b62d95 9a1155e 7ddea04 86b170e b7501c4 3146ae8 b64b8e5 8ba3907 747f38f 6652ef0 dbb9d8d f9e3bba 6f422ae 1d02e7b 545bbf6 3bf56f1 b6b90b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
Started at: 10:02:30 nb-bert-base, 0.001, 128 ({'_name_or_path': '/disk4/folder1/working/checkpoints/huggingface/native_pytorch/step4_8/', 'attention_probs_dropout_prob': 0.1, 'directionality': 'bidi', 'gradient_checkpointing': False, 'hidden_act': 'gelu', 'hidden_dropout_prob': 0.1, 'hidden_size': 768, 'initializer_range': 0.02, 'intermediate_size': 3072, 'layer_norm_eps': 1e-12, 'max_position_embeddings': 512, 'model_type': 'bert', 'num_attention_heads': 12, 'num_hidden_layers': 12, 'pad_token_id': 0, 'pooler_fc_size': 768, 'pooler_num_attention_heads': 12, 'pooler_num_fc_layers': 3, 'pooler_size_per_head': 128, 'pooler_type': 'first_token_transform', 'position_embedding_type': 'absolute', 'type_vocab_size': 2, 'vocab_size': 119547, '_commit_hash': '82b194c0b3ea1fcad65f1eceee04adb26f9f71ac'}, {}) Epoch: 0 Training loss: 0.33461407989263536 - MAE: 0.4395451926176116 Validation loss : 0.17470807801274693 - MAE: 0.316590567311014 Epoch: 1 Training loss: 0.167546104490757 - MAE: 0.3104947695666053 Validation loss : 0.1605402261895292 - MAE: 0.3041267300080164 Epoch: 2 Training loss: 0.15953998982906342 - MAE: 0.30230224392344485 Validation loss : 0.156581671360661 - MAE: 0.3003702843252824 Epoch: 3 Training loss: 0.15586254999041557 - MAE: 0.2976227208408245 Validation loss : 0.15435482824549956 - MAE: 0.29782302866508426 Epoch: 4 Training loss: 0.153594990670681 - MAE: 0.2951023367494838 Validation loss : 0.15327392255558686 - MAE: 0.29758807291884654 Epoch: 5 Training loss: 0.1521633943915367 - MAE: 0.293013468516263 Validation loss : 0.1528188367100323 - MAE: 0.29786525073805276 Epoch: 6 Training loss: 0.15114636063575745 - MAE: 0.29207026473332626 Validation loss : 0.1511023048968876 - MAE: 0.2940484620567765 Epoch: 7 Training loss: 0.14836823791265488 - MAE: 0.289356569209421 Validation loss : 0.15079707608503454 - MAE: 0.2948241208277115 Epoch: 8 Training loss: 0.14847442269325256 - MAE: 0.2894169052100116 Validation loss : 0.15011538915774403 - MAE: 0.2942575668480824 Epoch: 9 Training loss: 0.1463117568194866 - MAE: 0.2868930664850179 Validation loss : 0.14972165007801616 - MAE: 0.29381925573597917 Epoch: 10 Training loss: 0.14613379642367363 - MAE: 0.28686849242541645 Validation loss : 0.14911113547928193 - MAE: 0.2929993253708467 Epoch: 11 Training loss: 0.14440282836556434 - MAE: 0.2852567208945548 Validation loss : 0.14865088901099036 - MAE: 0.29170890920584996 Epoch: 12 Training loss: 0.14355748310685157 - MAE: 0.2853594401786027 |