IA / checkpoint-70215 /trainer_state.json
norBARA's picture
C
5ed833f
{
"best_metric": 0.9058317542076111,
"best_model_checkpoint": "norBARA/IA/checkpoint-70215",
"epoch": 3.0,
"global_step": 70215,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 4.973296304208503e-05,
"loss": 4.0843,
"step": 500
},
{
"epoch": 0.04,
"learning_rate": 4.946592608417005e-05,
"loss": 3.5105,
"step": 1000
},
{
"epoch": 0.06,
"learning_rate": 4.9198889126255076e-05,
"loss": 3.2948,
"step": 1500
},
{
"epoch": 0.09,
"learning_rate": 4.89318521683401e-05,
"loss": 3.1137,
"step": 2000
},
{
"epoch": 0.11,
"learning_rate": 4.8664815210425125e-05,
"loss": 2.9855,
"step": 2500
},
{
"epoch": 0.13,
"learning_rate": 4.839777825251015e-05,
"loss": 2.9086,
"step": 3000
},
{
"epoch": 0.15,
"learning_rate": 4.8130741294595174e-05,
"loss": 2.8061,
"step": 3500
},
{
"epoch": 0.17,
"learning_rate": 4.78637043366802e-05,
"loss": 2.7305,
"step": 4000
},
{
"epoch": 0.19,
"learning_rate": 4.7596667378765224e-05,
"loss": 2.6679,
"step": 4500
},
{
"epoch": 0.21,
"learning_rate": 4.732963042085025e-05,
"loss": 2.616,
"step": 5000
},
{
"epoch": 0.23,
"learning_rate": 4.706259346293527e-05,
"loss": 2.5824,
"step": 5500
},
{
"epoch": 0.26,
"learning_rate": 4.67955565050203e-05,
"loss": 2.5671,
"step": 6000
},
{
"epoch": 0.28,
"learning_rate": 4.652851954710532e-05,
"loss": 2.4818,
"step": 6500
},
{
"epoch": 0.3,
"learning_rate": 4.6261482589190346e-05,
"loss": 2.4255,
"step": 7000
},
{
"epoch": 0.32,
"learning_rate": 4.599444563127537e-05,
"loss": 2.4096,
"step": 7500
},
{
"epoch": 0.34,
"learning_rate": 4.5727408673360395e-05,
"loss": 2.3959,
"step": 8000
},
{
"epoch": 0.36,
"learning_rate": 4.546037171544542e-05,
"loss": 2.3399,
"step": 8500
},
{
"epoch": 0.38,
"learning_rate": 4.5193334757530445e-05,
"loss": 2.3288,
"step": 9000
},
{
"epoch": 0.41,
"learning_rate": 4.492629779961547e-05,
"loss": 2.2737,
"step": 9500
},
{
"epoch": 0.43,
"learning_rate": 4.4659260841700494e-05,
"loss": 2.2641,
"step": 10000
},
{
"epoch": 0.45,
"learning_rate": 4.439222388378552e-05,
"loss": 2.2556,
"step": 10500
},
{
"epoch": 0.47,
"learning_rate": 4.412518692587054e-05,
"loss": 2.2102,
"step": 11000
},
{
"epoch": 0.49,
"learning_rate": 4.385814996795557e-05,
"loss": 2.157,
"step": 11500
},
{
"epoch": 0.51,
"learning_rate": 4.359111301004059e-05,
"loss": 2.1435,
"step": 12000
},
{
"epoch": 0.53,
"learning_rate": 4.3324076052125617e-05,
"loss": 2.1125,
"step": 12500
},
{
"epoch": 0.56,
"learning_rate": 4.305703909421064e-05,
"loss": 2.0885,
"step": 13000
},
{
"epoch": 0.58,
"learning_rate": 4.2790002136295666e-05,
"loss": 2.0974,
"step": 13500
},
{
"epoch": 0.6,
"learning_rate": 4.252296517838069e-05,
"loss": 2.0545,
"step": 14000
},
{
"epoch": 0.62,
"learning_rate": 4.2255928220465715e-05,
"loss": 2.0123,
"step": 14500
},
{
"epoch": 0.64,
"learning_rate": 4.198889126255074e-05,
"loss": 2.0299,
"step": 15000
},
{
"epoch": 0.66,
"learning_rate": 4.1721854304635764e-05,
"loss": 1.963,
"step": 15500
},
{
"epoch": 0.68,
"learning_rate": 4.145481734672079e-05,
"loss": 2.0054,
"step": 16000
},
{
"epoch": 0.7,
"learning_rate": 4.118778038880581e-05,
"loss": 2.0102,
"step": 16500
},
{
"epoch": 0.73,
"learning_rate": 4.092074343089084e-05,
"loss": 1.9359,
"step": 17000
},
{
"epoch": 0.75,
"learning_rate": 4.065370647297586e-05,
"loss": 1.9243,
"step": 17500
},
{
"epoch": 0.77,
"learning_rate": 4.038666951506089e-05,
"loss": 1.9235,
"step": 18000
},
{
"epoch": 0.79,
"learning_rate": 4.011963255714591e-05,
"loss": 1.8522,
"step": 18500
},
{
"epoch": 0.81,
"learning_rate": 3.9852595599230936e-05,
"loss": 1.8656,
"step": 19000
},
{
"epoch": 0.83,
"learning_rate": 3.958555864131596e-05,
"loss": 1.8714,
"step": 19500
},
{
"epoch": 0.85,
"learning_rate": 3.9318521683400985e-05,
"loss": 1.8386,
"step": 20000
},
{
"epoch": 0.88,
"learning_rate": 3.905148472548601e-05,
"loss": 1.8411,
"step": 20500
},
{
"epoch": 0.9,
"learning_rate": 3.8784447767571034e-05,
"loss": 1.8251,
"step": 21000
},
{
"epoch": 0.92,
"learning_rate": 3.851741080965606e-05,
"loss": 1.8082,
"step": 21500
},
{
"epoch": 0.94,
"learning_rate": 3.825037385174108e-05,
"loss": 1.7752,
"step": 22000
},
{
"epoch": 0.96,
"learning_rate": 3.798333689382611e-05,
"loss": 1.7992,
"step": 22500
},
{
"epoch": 0.98,
"learning_rate": 3.771629993591113e-05,
"loss": 1.7578,
"step": 23000
},
{
"epoch": 1.0,
"eval_gen_len": 10.30942106387524,
"eval_loss": 1.3722968101501465,
"eval_rouge1": 13.5957,
"eval_rouge2": 7.0766,
"eval_rougeL": 13.3911,
"eval_rougeLsum": 13.3971,
"eval_runtime": 1615.8665,
"eval_samples_per_second": 28.969,
"eval_steps_per_second": 3.622,
"step": 23405
},
{
"epoch": 1.0,
"learning_rate": 3.744926297799616e-05,
"loss": 1.7477,
"step": 23500
},
{
"epoch": 1.03,
"learning_rate": 3.718222602008118e-05,
"loss": 1.7099,
"step": 24000
},
{
"epoch": 1.05,
"learning_rate": 3.6915189062166206e-05,
"loss": 1.7081,
"step": 24500
},
{
"epoch": 1.07,
"learning_rate": 3.664815210425123e-05,
"loss": 1.7086,
"step": 25000
},
{
"epoch": 1.09,
"learning_rate": 3.6381115146336255e-05,
"loss": 1.6554,
"step": 25500
},
{
"epoch": 1.11,
"learning_rate": 3.611407818842128e-05,
"loss": 1.6971,
"step": 26000
},
{
"epoch": 1.13,
"learning_rate": 3.5847041230506304e-05,
"loss": 1.6616,
"step": 26500
},
{
"epoch": 1.15,
"learning_rate": 3.558000427259133e-05,
"loss": 1.6644,
"step": 27000
},
{
"epoch": 1.17,
"learning_rate": 3.5312967314676354e-05,
"loss": 1.6301,
"step": 27500
},
{
"epoch": 1.2,
"learning_rate": 3.504593035676138e-05,
"loss": 1.638,
"step": 28000
},
{
"epoch": 1.22,
"learning_rate": 3.47788933988464e-05,
"loss": 1.6436,
"step": 28500
},
{
"epoch": 1.24,
"learning_rate": 3.451185644093143e-05,
"loss": 1.6214,
"step": 29000
},
{
"epoch": 1.26,
"learning_rate": 3.424481948301645e-05,
"loss": 1.5978,
"step": 29500
},
{
"epoch": 1.28,
"learning_rate": 3.3977782525101476e-05,
"loss": 1.5972,
"step": 30000
},
{
"epoch": 1.3,
"learning_rate": 3.37107455671865e-05,
"loss": 1.6007,
"step": 30500
},
{
"epoch": 1.32,
"learning_rate": 3.3443708609271526e-05,
"loss": 1.5834,
"step": 31000
},
{
"epoch": 1.35,
"learning_rate": 3.317667165135655e-05,
"loss": 1.5915,
"step": 31500
},
{
"epoch": 1.37,
"learning_rate": 3.290963469344157e-05,
"loss": 1.5525,
"step": 32000
},
{
"epoch": 1.39,
"learning_rate": 3.26425977355266e-05,
"loss": 1.546,
"step": 32500
},
{
"epoch": 1.41,
"learning_rate": 3.2375560777611624e-05,
"loss": 1.5355,
"step": 33000
},
{
"epoch": 1.43,
"learning_rate": 3.210852381969665e-05,
"loss": 1.5136,
"step": 33500
},
{
"epoch": 1.45,
"learning_rate": 3.184148686178167e-05,
"loss": 1.5422,
"step": 34000
},
{
"epoch": 1.47,
"learning_rate": 3.15744499038667e-05,
"loss": 1.5073,
"step": 34500
},
{
"epoch": 1.5,
"learning_rate": 3.130741294595172e-05,
"loss": 1.4878,
"step": 35000
},
{
"epoch": 1.52,
"learning_rate": 3.104037598803675e-05,
"loss": 1.5078,
"step": 35500
},
{
"epoch": 1.54,
"learning_rate": 3.077333903012177e-05,
"loss": 1.5254,
"step": 36000
},
{
"epoch": 1.56,
"learning_rate": 3.0506302072206792e-05,
"loss": 1.4822,
"step": 36500
},
{
"epoch": 1.58,
"learning_rate": 3.023926511429182e-05,
"loss": 1.4692,
"step": 37000
},
{
"epoch": 1.6,
"learning_rate": 2.997222815637684e-05,
"loss": 1.4699,
"step": 37500
},
{
"epoch": 1.62,
"learning_rate": 2.970519119846187e-05,
"loss": 1.5252,
"step": 38000
},
{
"epoch": 1.64,
"learning_rate": 2.9438154240546894e-05,
"loss": 1.476,
"step": 38500
},
{
"epoch": 1.67,
"learning_rate": 2.9171117282631915e-05,
"loss": 1.4817,
"step": 39000
},
{
"epoch": 1.69,
"learning_rate": 2.8904080324716943e-05,
"loss": 1.4274,
"step": 39500
},
{
"epoch": 1.71,
"learning_rate": 2.8637043366801968e-05,
"loss": 1.4528,
"step": 40000
},
{
"epoch": 1.73,
"learning_rate": 2.837000640888699e-05,
"loss": 1.4375,
"step": 40500
},
{
"epoch": 1.75,
"learning_rate": 2.8102969450972017e-05,
"loss": 1.4036,
"step": 41000
},
{
"epoch": 1.77,
"learning_rate": 2.783593249305704e-05,
"loss": 1.38,
"step": 41500
},
{
"epoch": 1.79,
"learning_rate": 2.7568895535142063e-05,
"loss": 1.4489,
"step": 42000
},
{
"epoch": 1.82,
"learning_rate": 2.730185857722709e-05,
"loss": 1.3985,
"step": 42500
},
{
"epoch": 1.84,
"learning_rate": 2.7034821619312112e-05,
"loss": 1.4323,
"step": 43000
},
{
"epoch": 1.86,
"learning_rate": 2.676778466139714e-05,
"loss": 1.4051,
"step": 43500
},
{
"epoch": 1.88,
"learning_rate": 2.6500747703482164e-05,
"loss": 1.3908,
"step": 44000
},
{
"epoch": 1.9,
"learning_rate": 2.6233710745567186e-05,
"loss": 1.3892,
"step": 44500
},
{
"epoch": 1.92,
"learning_rate": 2.5966673787652213e-05,
"loss": 1.3936,
"step": 45000
},
{
"epoch": 1.94,
"learning_rate": 2.5699636829737238e-05,
"loss": 1.4004,
"step": 45500
},
{
"epoch": 1.97,
"learning_rate": 2.543259987182226e-05,
"loss": 1.3838,
"step": 46000
},
{
"epoch": 1.99,
"learning_rate": 2.5165562913907287e-05,
"loss": 1.3756,
"step": 46500
},
{
"epoch": 2.0,
"eval_gen_len": 10.362208929715873,
"eval_loss": 1.0369791984558105,
"eval_rouge1": 21.6305,
"eval_rouge2": 14.527,
"eval_rougeL": 21.4438,
"eval_rougeLsum": 21.4536,
"eval_runtime": 1661.185,
"eval_samples_per_second": 28.179,
"eval_steps_per_second": 3.523,
"step": 46810
},
{
"epoch": 2.01,
"learning_rate": 2.4898525955992312e-05,
"loss": 1.3742,
"step": 47000
},
{
"epoch": 2.03,
"learning_rate": 2.4631488998077333e-05,
"loss": 1.3323,
"step": 47500
},
{
"epoch": 2.05,
"learning_rate": 2.4364452040162358e-05,
"loss": 1.3481,
"step": 48000
},
{
"epoch": 2.07,
"learning_rate": 2.4097415082247385e-05,
"loss": 1.3529,
"step": 48500
},
{
"epoch": 2.09,
"learning_rate": 2.383037812433241e-05,
"loss": 1.3385,
"step": 49000
},
{
"epoch": 2.11,
"learning_rate": 2.356334116641743e-05,
"loss": 1.3378,
"step": 49500
},
{
"epoch": 2.14,
"learning_rate": 2.329630420850246e-05,
"loss": 1.3214,
"step": 50000
},
{
"epoch": 2.16,
"learning_rate": 2.3029267250587484e-05,
"loss": 1.3214,
"step": 50500
},
{
"epoch": 2.18,
"learning_rate": 2.2762230292672508e-05,
"loss": 1.3032,
"step": 51000
},
{
"epoch": 2.2,
"learning_rate": 2.249519333475753e-05,
"loss": 1.3152,
"step": 51500
},
{
"epoch": 2.22,
"learning_rate": 2.2228156376842557e-05,
"loss": 1.3169,
"step": 52000
},
{
"epoch": 2.24,
"learning_rate": 2.1961119418927582e-05,
"loss": 1.3132,
"step": 52500
},
{
"epoch": 2.26,
"learning_rate": 2.1694082461012603e-05,
"loss": 1.3171,
"step": 53000
},
{
"epoch": 2.29,
"learning_rate": 2.1427045503097628e-05,
"loss": 1.2979,
"step": 53500
},
{
"epoch": 2.31,
"learning_rate": 2.1160008545182656e-05,
"loss": 1.3013,
"step": 54000
},
{
"epoch": 2.33,
"learning_rate": 2.089297158726768e-05,
"loss": 1.3067,
"step": 54500
},
{
"epoch": 2.35,
"learning_rate": 2.06259346293527e-05,
"loss": 1.3199,
"step": 55000
},
{
"epoch": 2.37,
"learning_rate": 2.0358897671437726e-05,
"loss": 1.2876,
"step": 55500
},
{
"epoch": 2.39,
"learning_rate": 2.0091860713522754e-05,
"loss": 1.2876,
"step": 56000
},
{
"epoch": 2.41,
"learning_rate": 1.982482375560778e-05,
"loss": 1.2703,
"step": 56500
},
{
"epoch": 2.44,
"learning_rate": 1.95577867976928e-05,
"loss": 1.2641,
"step": 57000
},
{
"epoch": 2.46,
"learning_rate": 1.9290749839777828e-05,
"loss": 1.2976,
"step": 57500
},
{
"epoch": 2.48,
"learning_rate": 1.9023712881862852e-05,
"loss": 1.2577,
"step": 58000
},
{
"epoch": 2.5,
"learning_rate": 1.8756675923947873e-05,
"loss": 1.2925,
"step": 58500
},
{
"epoch": 2.52,
"learning_rate": 1.8489638966032898e-05,
"loss": 1.2686,
"step": 59000
},
{
"epoch": 2.54,
"learning_rate": 1.8222602008117926e-05,
"loss": 1.273,
"step": 59500
},
{
"epoch": 2.56,
"learning_rate": 1.795556505020295e-05,
"loss": 1.2604,
"step": 60000
},
{
"epoch": 2.58,
"learning_rate": 1.768852809228797e-05,
"loss": 1.265,
"step": 60500
},
{
"epoch": 2.61,
"learning_rate": 1.7421491134372996e-05,
"loss": 1.2653,
"step": 61000
},
{
"epoch": 2.63,
"learning_rate": 1.7154454176458024e-05,
"loss": 1.2241,
"step": 61500
},
{
"epoch": 2.65,
"learning_rate": 1.688741721854305e-05,
"loss": 1.233,
"step": 62000
},
{
"epoch": 2.67,
"learning_rate": 1.662038026062807e-05,
"loss": 1.2328,
"step": 62500
},
{
"epoch": 2.69,
"learning_rate": 1.6353343302713095e-05,
"loss": 1.2456,
"step": 63000
},
{
"epoch": 2.71,
"learning_rate": 1.6086306344798122e-05,
"loss": 1.2269,
"step": 63500
},
{
"epoch": 2.73,
"learning_rate": 1.5819269386883147e-05,
"loss": 1.2386,
"step": 64000
},
{
"epoch": 2.76,
"learning_rate": 1.5552232428968168e-05,
"loss": 1.2462,
"step": 64500
},
{
"epoch": 2.78,
"learning_rate": 1.5285195471053196e-05,
"loss": 1.2436,
"step": 65000
},
{
"epoch": 2.8,
"learning_rate": 1.5018158513138219e-05,
"loss": 1.2263,
"step": 65500
},
{
"epoch": 2.82,
"learning_rate": 1.4751121555223244e-05,
"loss": 1.2415,
"step": 66000
},
{
"epoch": 2.84,
"learning_rate": 1.4484084597308267e-05,
"loss": 1.256,
"step": 66500
},
{
"epoch": 2.86,
"learning_rate": 1.4217047639393294e-05,
"loss": 1.2272,
"step": 67000
},
{
"epoch": 2.88,
"learning_rate": 1.3950010681478317e-05,
"loss": 1.2169,
"step": 67500
},
{
"epoch": 2.91,
"learning_rate": 1.3682973723563342e-05,
"loss": 1.2149,
"step": 68000
},
{
"epoch": 2.93,
"learning_rate": 1.3415936765648365e-05,
"loss": 1.2085,
"step": 68500
},
{
"epoch": 2.95,
"learning_rate": 1.3148899807733393e-05,
"loss": 1.2077,
"step": 69000
},
{
"epoch": 2.97,
"learning_rate": 1.2881862849818416e-05,
"loss": 1.2123,
"step": 69500
},
{
"epoch": 2.99,
"learning_rate": 1.261482589190344e-05,
"loss": 1.2093,
"step": 70000
},
{
"epoch": 3.0,
"eval_gen_len": 9.954048280281992,
"eval_loss": 0.9058317542076111,
"eval_rouge1": 26.5379,
"eval_rouge2": 19.0017,
"eval_rougeL": 26.3586,
"eval_rougeLsum": 26.3719,
"eval_runtime": 1636.659,
"eval_samples_per_second": 28.601,
"eval_steps_per_second": 3.576,
"step": 70215
}
],
"max_steps": 93620,
"num_train_epochs": 4,
"total_flos": 9789226340843520.0,
"trial_name": null,
"trial_params": null
}