|
{ |
|
"best_metric": 0.4225879484500174, |
|
"best_model_checkpoint": "/groups/claytonm/enoriega/kw_pubmed/kw_pubmed_5000_0.00006/checkpoint-585", |
|
"epoch": 4.770047108764301, |
|
"global_step": 735, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.961038961038961e-05, |
|
"loss": 4.0597, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 5.922077922077922e-05, |
|
"loss": 3.63, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5.883116883116883e-05, |
|
"loss": 3.467, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.3901079763148729, |
|
"eval_loss": 3.7511544227600098, |
|
"eval_runtime": 16.5637, |
|
"eval_samples_per_second": 603.729, |
|
"eval_steps_per_second": 18.897, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.844155844155844e-05, |
|
"loss": 3.3347, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 5.805194805194805e-05, |
|
"loss": 3.2169, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 5.7662337662337664e-05, |
|
"loss": 3.1359, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_accuracy": 0.39041274817136884, |
|
"eval_loss": 3.7756261825561523, |
|
"eval_runtime": 16.5475, |
|
"eval_samples_per_second": 604.322, |
|
"eval_steps_per_second": 18.915, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 5.7272727272727274e-05, |
|
"loss": 3.076, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 5.6883116883116884e-05, |
|
"loss": 2.9729, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 5.6493506493506495e-05, |
|
"loss": 2.931, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.39467955416231276, |
|
"eval_loss": 3.798929452896118, |
|
"eval_runtime": 16.648, |
|
"eval_samples_per_second": 600.672, |
|
"eval_steps_per_second": 18.801, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 5.6103896103896105e-05, |
|
"loss": 2.8682, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 5.5714285714285715e-05, |
|
"loss": 2.8255, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 5.5324675324675326e-05, |
|
"loss": 2.7794, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.39803204458376873, |
|
"eval_loss": 3.7885875701904297, |
|
"eval_runtime": 16.6293, |
|
"eval_samples_per_second": 601.348, |
|
"eval_steps_per_second": 18.822, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 5.4935064935064936e-05, |
|
"loss": 2.7529, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 5.4545454545454546e-05, |
|
"loss": 2.699, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.4155844155844157e-05, |
|
"loss": 2.6464, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.4050853361198189, |
|
"eval_loss": 3.7896740436553955, |
|
"eval_runtime": 17.133, |
|
"eval_samples_per_second": 583.668, |
|
"eval_steps_per_second": 18.269, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.376623376623377e-05, |
|
"loss": 2.6402, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.337662337662338e-05, |
|
"loss": 2.6216, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.298701298701299e-05, |
|
"loss": 2.5798, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_accuracy": 0.40316962730755834, |
|
"eval_loss": 3.832270383834839, |
|
"eval_runtime": 17.2068, |
|
"eval_samples_per_second": 581.165, |
|
"eval_steps_per_second": 18.19, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 5.267532467532468e-05, |
|
"loss": 2.5716, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.228571428571429e-05, |
|
"loss": 2.5615, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 5.18961038961039e-05, |
|
"loss": 2.5126, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_accuracy": 0.40687042842215254, |
|
"eval_loss": 3.8271889686584473, |
|
"eval_runtime": 17.1827, |
|
"eval_samples_per_second": 581.981, |
|
"eval_steps_per_second": 18.216, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 5.150649350649351e-05, |
|
"loss": 2.4727, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.111688311688312e-05, |
|
"loss": 2.5236, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 5.072727272727273e-05, |
|
"loss": 2.4631, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.39885928247997215, |
|
"eval_loss": 3.866598129272461, |
|
"eval_runtime": 17.205, |
|
"eval_samples_per_second": 581.228, |
|
"eval_steps_per_second": 18.192, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.033766233766233e-05, |
|
"loss": 2.4563, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.994805194805195e-05, |
|
"loss": 2.4166, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.955844155844156e-05, |
|
"loss": 2.4321, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_accuracy": 0.4076541274817137, |
|
"eval_loss": 3.8538074493408203, |
|
"eval_runtime": 17.5232, |
|
"eval_samples_per_second": 570.673, |
|
"eval_steps_per_second": 17.862, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.916883116883117e-05, |
|
"loss": 2.3705, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.877922077922078e-05, |
|
"loss": 2.3907, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.838961038961039e-05, |
|
"loss": 2.357, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"eval_accuracy": 0.4062173458725183, |
|
"eval_loss": 3.8510184288024902, |
|
"eval_runtime": 17.1923, |
|
"eval_samples_per_second": 581.656, |
|
"eval_steps_per_second": 18.206, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.8e-05, |
|
"loss": 2.5669, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.761038961038961e-05, |
|
"loss": 2.2904, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.722077922077922e-05, |
|
"loss": 2.2463, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"eval_accuracy": 0.4090909090909091, |
|
"eval_loss": 3.870206117630005, |
|
"eval_runtime": 17.2036, |
|
"eval_samples_per_second": 581.273, |
|
"eval_steps_per_second": 18.194, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.683116883116883e-05, |
|
"loss": 2.2434, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.644155844155844e-05, |
|
"loss": 2.2517, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.605194805194805e-05, |
|
"loss": 2.2312, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_accuracy": 0.4103099965168931, |
|
"eval_loss": 3.878140687942505, |
|
"eval_runtime": 17.2005, |
|
"eval_samples_per_second": 581.377, |
|
"eval_steps_per_second": 18.197, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.566233766233766e-05, |
|
"loss": 2.2025, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.5272727272727274e-05, |
|
"loss": 2.199, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.4883116883116884e-05, |
|
"loss": 2.1948, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"eval_accuracy": 0.4127917102055033, |
|
"eval_loss": 3.8671493530273438, |
|
"eval_runtime": 17.2222, |
|
"eval_samples_per_second": 580.645, |
|
"eval_steps_per_second": 18.174, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.4493506493506494e-05, |
|
"loss": 2.1707, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.4103896103896105e-05, |
|
"loss": 2.1603, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.3714285714285715e-05, |
|
"loss": 2.1735, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"eval_accuracy": 0.4157088122605364, |
|
"eval_loss": 3.863778829574585, |
|
"eval_runtime": 17.1906, |
|
"eval_samples_per_second": 581.713, |
|
"eval_steps_per_second": 18.208, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.3324675324675325e-05, |
|
"loss": 2.1833, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.2935064935064936e-05, |
|
"loss": 2.1442, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.2545454545454546e-05, |
|
"loss": 2.1541, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_accuracy": 0.4085249042145594, |
|
"eval_loss": 3.8899924755096436, |
|
"eval_runtime": 17.2073, |
|
"eval_samples_per_second": 581.148, |
|
"eval_steps_per_second": 18.19, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 4.2155844155844156e-05, |
|
"loss": 2.1361, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.1766233766233767e-05, |
|
"loss": 2.1254, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.137662337662338e-05, |
|
"loss": 2.1117, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_accuracy": 0.414272030651341, |
|
"eval_loss": 3.9167368412017822, |
|
"eval_runtime": 17.1975, |
|
"eval_samples_per_second": 581.48, |
|
"eval_steps_per_second": 18.2, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.098701298701299e-05, |
|
"loss": 2.1208, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.05974025974026e-05, |
|
"loss": 2.0991, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.020779220779221e-05, |
|
"loss": 2.1094, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"eval_accuracy": 0.4169278996865204, |
|
"eval_loss": 3.8918027877807617, |
|
"eval_runtime": 17.2198, |
|
"eval_samples_per_second": 580.727, |
|
"eval_steps_per_second": 18.177, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.9818181818181825e-05, |
|
"loss": 2.1038, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 3.942857142857143e-05, |
|
"loss": 2.0806, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.9038961038961046e-05, |
|
"loss": 2.0917, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.41300940438871475, |
|
"eval_loss": 3.920091152191162, |
|
"eval_runtime": 17.181, |
|
"eval_samples_per_second": 582.037, |
|
"eval_steps_per_second": 18.218, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 3.864935064935065e-05, |
|
"loss": 2.0582, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 3.8259740259740266e-05, |
|
"loss": 2.0541, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 3.787012987012987e-05, |
|
"loss": 2.0453, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_accuracy": 0.41814698711250436, |
|
"eval_loss": 3.8712852001190186, |
|
"eval_runtime": 17.1826, |
|
"eval_samples_per_second": 581.984, |
|
"eval_steps_per_second": 18.216, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 3.748051948051949e-05, |
|
"loss": 2.0351, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 3.709090909090909e-05, |
|
"loss": 2.0527, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 3.670129870129871e-05, |
|
"loss": 2.0247, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_accuracy": 0.41762452107279696, |
|
"eval_loss": 3.8774173259735107, |
|
"eval_runtime": 17.2528, |
|
"eval_samples_per_second": 579.615, |
|
"eval_steps_per_second": 18.142, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 3.631168831168831e-05, |
|
"loss": 2.0146, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 3.592207792207792e-05, |
|
"loss": 2.2296, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.553246753246753e-05, |
|
"loss": 1.9703, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"eval_accuracy": 0.4164925113200975, |
|
"eval_loss": 3.891786813735962, |
|
"eval_runtime": 17.1846, |
|
"eval_samples_per_second": 581.915, |
|
"eval_steps_per_second": 18.214, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 3.514285714285714e-05, |
|
"loss": 1.9429, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 3.475324675324675e-05, |
|
"loss": 1.9408, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.436363636363636e-05, |
|
"loss": 1.9492, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"eval_accuracy": 0.41610066179031696, |
|
"eval_loss": 3.928813934326172, |
|
"eval_runtime": 17.2139, |
|
"eval_samples_per_second": 580.927, |
|
"eval_steps_per_second": 18.183, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.397402597402597e-05, |
|
"loss": 1.9459, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.3584415584415584e-05, |
|
"loss": 1.9532, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.3194805194805194e-05, |
|
"loss": 1.9351, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_accuracy": 0.4128352490421456, |
|
"eval_loss": 3.947727680206299, |
|
"eval_runtime": 17.2191, |
|
"eval_samples_per_second": 580.75, |
|
"eval_steps_per_second": 18.177, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 3.2805194805194804e-05, |
|
"loss": 1.9483, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 3.2415584415584415e-05, |
|
"loss": 1.9433, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 3.2025974025974025e-05, |
|
"loss": 1.9001, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"eval_accuracy": 0.4186259143155695, |
|
"eval_loss": 3.9299826622009277, |
|
"eval_runtime": 17.1998, |
|
"eval_samples_per_second": 581.401, |
|
"eval_steps_per_second": 18.198, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 3.1636363636363635e-05, |
|
"loss": 1.9127, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 3.1246753246753246e-05, |
|
"loss": 1.9051, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 3.0857142857142856e-05, |
|
"loss": 1.9082, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"eval_accuracy": 0.4161442006269592, |
|
"eval_loss": 3.9622690677642822, |
|
"eval_runtime": 17.2078, |
|
"eval_samples_per_second": 581.133, |
|
"eval_steps_per_second": 18.189, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 3.046753246753247e-05, |
|
"loss": 1.8929, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.0077922077922077e-05, |
|
"loss": 1.912, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 2.968831168831169e-05, |
|
"loss": 1.8804, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_accuracy": 0.4174503657262278, |
|
"eval_loss": 3.9447922706604004, |
|
"eval_runtime": 17.219, |
|
"eval_samples_per_second": 580.753, |
|
"eval_steps_per_second": 18.178, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.92987012987013e-05, |
|
"loss": 1.8821, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.890909090909091e-05, |
|
"loss": 1.8962, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.851948051948052e-05, |
|
"loss": 1.8962, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"eval_accuracy": 0.4183646812957158, |
|
"eval_loss": 3.939225673675537, |
|
"eval_runtime": 17.2072, |
|
"eval_samples_per_second": 581.153, |
|
"eval_steps_per_second": 18.19, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 2.812987012987013e-05, |
|
"loss": 1.8903, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.774025974025974e-05, |
|
"loss": 1.8881, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 2.735064935064935e-05, |
|
"loss": 1.884, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"eval_accuracy": 0.4125740160222919, |
|
"eval_loss": 3.95487380027771, |
|
"eval_runtime": 17.2137, |
|
"eval_samples_per_second": 580.934, |
|
"eval_steps_per_second": 18.183, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 2.696103896103896e-05, |
|
"loss": 1.8694, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.657142857142857e-05, |
|
"loss": 1.872, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.618181818181818e-05, |
|
"loss": 1.8689, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_accuracy": 0.4190613026819923, |
|
"eval_loss": 3.909341812133789, |
|
"eval_runtime": 17.2189, |
|
"eval_samples_per_second": 580.758, |
|
"eval_steps_per_second": 18.178, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.579220779220779e-05, |
|
"loss": 1.877, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 2.54025974025974e-05, |
|
"loss": 1.8685, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.501298701298701e-05, |
|
"loss": 1.8522, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"eval_accuracy": 0.4203239289446186, |
|
"eval_loss": 3.9448978900909424, |
|
"eval_runtime": 17.2358, |
|
"eval_samples_per_second": 580.188, |
|
"eval_steps_per_second": 18.16, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 2.4623376623376625e-05, |
|
"loss": 1.8654, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.4233766233766235e-05, |
|
"loss": 1.8605, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.3844155844155845e-05, |
|
"loss": 2.037, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"eval_accuracy": 0.41705851619644724, |
|
"eval_loss": 3.981081008911133, |
|
"eval_runtime": 17.206, |
|
"eval_samples_per_second": 581.194, |
|
"eval_steps_per_second": 18.191, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.3454545454545456e-05, |
|
"loss": 1.8046, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 2.3064935064935066e-05, |
|
"loss": 1.8011, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 2.2675324675324676e-05, |
|
"loss": 1.8038, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"eval_accuracy": 0.4150992685475444, |
|
"eval_loss": 3.9613595008850098, |
|
"eval_runtime": 17.1973, |
|
"eval_samples_per_second": 581.488, |
|
"eval_steps_per_second": 18.201, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 2.2285714285714287e-05, |
|
"loss": 1.8073, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 2.1896103896103897e-05, |
|
"loss": 1.783, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 2.1506493506493507e-05, |
|
"loss": 1.805, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"eval_accuracy": 0.41819052594914663, |
|
"eval_loss": 3.9464197158813477, |
|
"eval_runtime": 17.2204, |
|
"eval_samples_per_second": 580.705, |
|
"eval_steps_per_second": 18.176, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 2.1116883116883118e-05, |
|
"loss": 1.7972, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 2.0727272727272728e-05, |
|
"loss": 1.7782, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 2.033766233766234e-05, |
|
"loss": 1.8135, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"eval_accuracy": 0.41592650644374785, |
|
"eval_loss": 3.9678966999053955, |
|
"eval_runtime": 17.1877, |
|
"eval_samples_per_second": 581.811, |
|
"eval_steps_per_second": 18.211, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.994805194805195e-05, |
|
"loss": 1.7835, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.955844155844156e-05, |
|
"loss": 1.782, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.916883116883117e-05, |
|
"loss": 1.7927, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"eval_accuracy": 0.419932079414838, |
|
"eval_loss": 3.9554903507232666, |
|
"eval_runtime": 17.1692, |
|
"eval_samples_per_second": 582.437, |
|
"eval_steps_per_second": 18.23, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 1.877922077922078e-05, |
|
"loss": 1.7979, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.838961038961039e-05, |
|
"loss": 1.7894, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.8e-05, |
|
"loss": 1.7723, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"eval_accuracy": 0.41936607453848834, |
|
"eval_loss": 3.970630645751953, |
|
"eval_runtime": 17.1943, |
|
"eval_samples_per_second": 581.588, |
|
"eval_steps_per_second": 18.204, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 1.761038961038961e-05, |
|
"loss": 1.797, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.722077922077922e-05, |
|
"loss": 1.7836, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.683116883116883e-05, |
|
"loss": 1.7893, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"eval_accuracy": 0.4174068268895855, |
|
"eval_loss": 3.984471321105957, |
|
"eval_runtime": 17.1471, |
|
"eval_samples_per_second": 583.19, |
|
"eval_steps_per_second": 18.254, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.6441558441558442e-05, |
|
"loss": 1.7891, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.6051948051948052e-05, |
|
"loss": 1.7564, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.5662337662337662e-05, |
|
"loss": 1.7725, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"eval_accuracy": 0.41762452107279696, |
|
"eval_loss": 3.9673519134521484, |
|
"eval_runtime": 16.8716, |
|
"eval_samples_per_second": 592.713, |
|
"eval_steps_per_second": 18.552, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.5272727272727273e-05, |
|
"loss": 1.7631, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.4883116883116883e-05, |
|
"loss": 1.7441, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 1.4493506493506493e-05, |
|
"loss": 1.7721, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_accuracy": 0.4225879484500174, |
|
"eval_loss": 3.9382729530334473, |
|
"eval_runtime": 17.13, |
|
"eval_samples_per_second": 583.773, |
|
"eval_steps_per_second": 18.272, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 1.4103896103896104e-05, |
|
"loss": 1.7317, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.3714285714285714e-05, |
|
"loss": 1.7738, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.3324675324675324e-05, |
|
"loss": 1.7533, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"eval_accuracy": 0.4196708463949843, |
|
"eval_loss": 3.96612811088562, |
|
"eval_runtime": 17.1432, |
|
"eval_samples_per_second": 583.32, |
|
"eval_steps_per_second": 18.258, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.2935064935064935e-05, |
|
"loss": 1.7658, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 1.2545454545454545e-05, |
|
"loss": 1.7635, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 1.2155844155844157e-05, |
|
"loss": 1.7437, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"eval_accuracy": 0.41910484151863464, |
|
"eval_loss": 3.972932815551758, |
|
"eval_runtime": 17.2356, |
|
"eval_samples_per_second": 580.195, |
|
"eval_steps_per_second": 18.16, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 1.1766233766233768e-05, |
|
"loss": 1.9054, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 1.1376623376623378e-05, |
|
"loss": 1.7217, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.0987012987012988e-05, |
|
"loss": 1.7418, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"eval_accuracy": 0.4188000696621386, |
|
"eval_loss": 4.000043869018555, |
|
"eval_runtime": 17.2412, |
|
"eval_samples_per_second": 580.005, |
|
"eval_steps_per_second": 18.154, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 1.0597402597402599e-05, |
|
"loss": 1.6958, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 1.0207792207792209e-05, |
|
"loss": 1.7156, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 9.81818181818182e-06, |
|
"loss": 1.7237, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"eval_accuracy": 0.42106408916753746, |
|
"eval_loss": 3.971540927886963, |
|
"eval_runtime": 17.1943, |
|
"eval_samples_per_second": 581.589, |
|
"eval_steps_per_second": 18.204, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 9.428571428571428e-06, |
|
"loss": 1.7111, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 9.038961038961038e-06, |
|
"loss": 1.7228, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 8.649350649350649e-06, |
|
"loss": 1.7029, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"eval_accuracy": 0.41919191919191917, |
|
"eval_loss": 4.000442981719971, |
|
"eval_runtime": 17.2363, |
|
"eval_samples_per_second": 580.172, |
|
"eval_steps_per_second": 18.159, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 8.259740259740259e-06, |
|
"loss": 1.7336, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 7.87012987012987e-06, |
|
"loss": 1.7146, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 7.48051948051948e-06, |
|
"loss": 1.7096, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"eval_accuracy": 0.4215430163706026, |
|
"eval_loss": 3.9922502040863037, |
|
"eval_runtime": 17.1842, |
|
"eval_samples_per_second": 581.93, |
|
"eval_steps_per_second": 18.214, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 7.090909090909091e-06, |
|
"loss": 1.7183, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 6.701298701298701e-06, |
|
"loss": 1.7323, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 6.311688311688311e-06, |
|
"loss": 1.7087, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"eval_accuracy": 0.42254440961337514, |
|
"eval_loss": 3.9751763343811035, |
|
"eval_runtime": 17.2048, |
|
"eval_samples_per_second": 581.233, |
|
"eval_steps_per_second": 18.193, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 5.9220779220779226e-06, |
|
"loss": 1.7267, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 5.532467532467533e-06, |
|
"loss": 1.6924, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 5.142857142857143e-06, |
|
"loss": 1.7101, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"eval_accuracy": 0.42080285614768376, |
|
"eval_loss": 3.9924566745758057, |
|
"eval_runtime": 17.2206, |
|
"eval_samples_per_second": 580.699, |
|
"eval_steps_per_second": 18.176, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 4.7532467532467536e-06, |
|
"loss": 1.7133, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 4.363636363636364e-06, |
|
"loss": 1.7042, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 3.974025974025973e-06, |
|
"loss": 1.7061, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"eval_accuracy": 0.4183211424590735, |
|
"eval_loss": 3.9925851821899414, |
|
"eval_runtime": 17.1622, |
|
"eval_samples_per_second": 582.675, |
|
"eval_steps_per_second": 18.238, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 3.5844155844155846e-06, |
|
"loss": 1.7194, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 3.194805194805195e-06, |
|
"loss": 1.7283, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.8051948051948052e-06, |
|
"loss": 1.7227, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_accuracy": 0.41671020550330895, |
|
"eval_loss": 4.021413326263428, |
|
"eval_runtime": 17.2026, |
|
"eval_samples_per_second": 581.308, |
|
"eval_steps_per_second": 18.195, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"step": 735, |
|
"total_flos": 4.8027460072854234e+17, |
|
"train_loss": 2.085782461101506, |
|
"train_runtime": 24097.4857, |
|
"train_samples_per_second": 256.517, |
|
"train_steps_per_second": 0.032 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_accuracy": 0.4171891327063741, |
|
"eval_loss": 3.979358434677124, |
|
"eval_runtime": 17.2044, |
|
"eval_samples_per_second": 581.246, |
|
"eval_steps_per_second": 18.193, |
|
"step": 735 |
|
} |
|
], |
|
"max_steps": 770, |
|
"num_train_epochs": 5, |
|
"total_flos": 4.8027460072854234e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|