File size: 3,816 Bytes
109878e 812f96b 84dea52 acf4767 d894b6b c6ad312 a8baf0f 4022cbe 8c97bc3 ca29c4b 298c292 7e96c6d 31e5835 46a8fd0 f2877e8 4533000 65feab4 05ec0b5 f0be090 c6ab986 a3ba46d b242d00 78b8095 a4f43a9 c4c7949 2bb66f4 8c3c1fb 0832816 f65db6a e1a5388 2753153 a77a7f7 7fa1384 2078d66 4da7eba 511161f f8f9070 64ebf50 3bce773 b6fbd59 9f3a4f5 6560b53 93198fd b08f72d 550ebb9 7e19820 37e9ba1 af9a305 4e45e88 379d6c9 56f8723 7ddf86c a099651 3e900b4 b905eca 2af8933 b8c73cb 9a480e5 c6c95f8 4aa97db 4c517bc 4742ff1 bbf626d b20de8f 1260ce3 4f70245 48c4454 e532167 0565f27 191f0a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
Started at: 09:30:12 ({'_name_or_path': '/disk4/folder1/working/checkpoints/huggingface/native_pytorch/step4_8/', 'attention_probs_dropout_prob': 0.1, 'directionality': 'bidi', 'gradient_checkpointing': False, 'hidden_act': 'gelu', 'hidden_dropout_prob': 0.1, 'hidden_size': 768, 'initializer_range': 0.02, 'intermediate_size': 3072, 'layer_norm_eps': 1e-12, 'max_position_embeddings': 512, 'model_type': 'bert', 'num_attention_heads': 12, 'num_hidden_layers': 12, 'pad_token_id': 0, 'pooler_fc_size': 768, 'pooler_num_attention_heads': 12, 'pooler_num_fc_layers': 3, 'pooler_size_per_head': 128, 'pooler_type': 'first_token_transform', 'position_embedding_type': 'absolute', 'type_vocab_size': 2, 'vocab_size': 119547, '_commit_hash': '82b194c0b3ea1fcad65f1eceee04adb26f9f71ac'}, {}) Epoch: 0 Training loss: 0.21533221261281715 - MSE: 0.34197339416593503 Validation loss : 0.14451597188599408 - MSE: 0.2960304229995927 Epoch: 1 Training loss: 0.1517588474640721 - MSE: 0.2956301456982305 Validation loss : 0.12380113103426993 - MSE: 0.26862038821082024 Epoch: 2 Training loss: 0.14443182972700971 - MSE: 0.28899192076676716 Validation loss : 0.12112187955062836 - MSE: 0.2663072116915828 Epoch: 3 Training loss: 0.14475514602504277 - MSE: 0.29038970057934027 Validation loss : 0.13478531874716282 - MSE: 0.2850182042234337 Epoch: 4 Training loss: 0.1408511215134671 - MSE: 0.2861137754569417 Validation loss : 0.13267319882288575 - MSE: 0.2831877799249014 Epoch: 5 Training loss: 0.136032929232246 - MSE: 0.279424393321389 Validation loss : 0.11587779759429395 - MSE: 0.2579734090027159 Epoch: 6 Training loss: 0.1351911084432351 - MSE: 0.2779842455851462 Validation loss : 0.11928518384229392 - MSE: 0.25956903308497203 Epoch: 7 Training loss: 0.14176342871628309 - MSE: 0.28619337123151156 Validation loss : 0.11641600204166025 - MSE: 0.2570362210167332 Epoch: 8 Training loss: 0.13039547287319836 - MSE: 0.2738442464898942 Validation loss : 0.11378515849355608 - MSE: 0.2556338147619499 Epoch: 9 Training loss: 0.13758943127958398 - MSE: 0.2815961984005678 Validation loss : 0.11927694408223033 - MSE: 0.2592867935668437 Epoch: 10 Training loss: 0.13600184517471414 - MSE: 0.27845210879496773 Validation loss : 0.12289933965075761 - MSE: 0.2641740314788876 Epoch: 11 Training loss: 0.12924143111235217 - MSE: 0.27266305881012975 Validation loss : 0.11782969336491078 - MSE: 0.258937672893353 Epoch: 12 Training loss: 0.1344190981827284 - MSE: 0.28026616499269114 Validation loss : 0.11472726217471063 - MSE: 0.25845796944440735 Epoch: 13 Training loss: 0.1375312733414926 - MSE: 0.28103011966626457 Validation loss : 0.11630514287389815 - MSE: 0.25740234173235876 Epoch: 14 Training loss: 0.13571442161735736 - MSE: 0.2811562022346792 Validation loss : 0.11639386380556971 - MSE: 0.2606048270604333 Epoch: 15 Training loss: 0.14136280589982084 - MSE: 0.286976132520683 Validation loss : 0.12002589798066765 - MSE: 0.26447004281317277 Epoch: 16 Training loss: 0.13457591416020143 - MSE: 0.2796572462970741 Validation loss : 0.11478170729242265 - MSE: 0.2548193569305113 Epoch: 17 Training loss: 0.1362821259780934 - MSE: 0.28092215080945243 Validation loss : 0.11653975711669773 - MSE: 0.25721588885744495 Epoch: 18 Training loss: 0.1377105382320128 - MSE: 0.28347960023660485 Validation loss : 0.1213059100555256 - MSE: 0.2660068857726401 Epoch: 19 Training loss: 0.13346115291902894 - MSE: 0.2807079509831734 Validation loss : 0.11323059559799731 - MSE: 0.2554361889847314 Epoch: 20 Training loss: 0.1303498934758337 - MSE: 0.27570483001616874 Validation loss : 0.11533307109493762 - MSE: 0.25642082826334445 Epoch: 21 Training loss: 0.1338770524843743 - MSE: 0.2793606339234174 Validation loss : 0.12536148354411125 - MSE: 0.2694582264007295 Epoch: 22 Training loss: 0.13188404574206 - MSE: 0.2766540656590396 |