|
{ |
|
"best_metric": 0.9052917957305908, |
|
"best_model_checkpoint": "./tst-translation-output/checkpoint-12000", |
|
"epoch": 12.126224156692057, |
|
"eval_steps": 3000, |
|
"global_step": 39000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.712509712509713e-06, |
|
"loss": 1.9995, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9425019425019425e-05, |
|
"loss": 1.4173, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 2.913752913752914e-05, |
|
"loss": 1.3137, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.885003885003885e-05, |
|
"loss": 1.2381, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.856254856254856e-05, |
|
"loss": 1.2023, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.991638730343166e-05, |
|
"loss": 1.1751, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_bleu": 7.6766, |
|
"eval_gen_len": 17.4644, |
|
"eval_loss": 1.1288801431655884, |
|
"eval_runtime": 851.3289, |
|
"eval_samples_per_second": 15.112, |
|
"eval_steps_per_second": 0.946, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.981825033562843e-05, |
|
"loss": 1.1267, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.972011336782521e-05, |
|
"loss": 1.1013, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.962197640002198e-05, |
|
"loss": 1.0821, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.952383943221876e-05, |
|
"loss": 1.0599, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.9425702464415536e-05, |
|
"loss": 1.0336, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.9327565496612316e-05, |
|
"loss": 1.0268, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_bleu": 9.2277, |
|
"eval_gen_len": 17.7668, |
|
"eval_loss": 0.9894970655441284, |
|
"eval_runtime": 891.3112, |
|
"eval_samples_per_second": 14.434, |
|
"eval_steps_per_second": 0.903, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.922942852880909e-05, |
|
"loss": 0.9631, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.913129156100586e-05, |
|
"loss": 0.8021, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.903315459320265e-05, |
|
"loss": 0.8189, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.893501762539942e-05, |
|
"loss": 0.8034, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.8836880657596196e-05, |
|
"loss": 0.8156, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.873874368979297e-05, |
|
"loss": 0.8075, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_bleu": 9.1197, |
|
"eval_gen_len": 17.6811, |
|
"eval_loss": 0.945686399936676, |
|
"eval_runtime": 839.7466, |
|
"eval_samples_per_second": 15.32, |
|
"eval_steps_per_second": 0.959, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.864060672198975e-05, |
|
"loss": 0.81, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.854246975418653e-05, |
|
"loss": 0.8101, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.84443327863833e-05, |
|
"loss": 0.7964, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.8346195818580075e-05, |
|
"loss": 0.806, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.8248058850776855e-05, |
|
"loss": 0.8042, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.814992188297363e-05, |
|
"loss": 0.8082, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_bleu": 8.4837, |
|
"eval_gen_len": 17.4826, |
|
"eval_loss": 0.9052917957305908, |
|
"eval_runtime": 837.7614, |
|
"eval_samples_per_second": 15.356, |
|
"eval_steps_per_second": 0.961, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 4.805178491517041e-05, |
|
"loss": 0.8032, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.795364794736719e-05, |
|
"loss": 0.7252, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.785551097956396e-05, |
|
"loss": 0.5682, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.7757374011760734e-05, |
|
"loss": 0.5741, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.7659237043957514e-05, |
|
"loss": 0.5818, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.7561100076154294e-05, |
|
"loss": 0.5841, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_bleu": 9.8887, |
|
"eval_gen_len": 17.5166, |
|
"eval_loss": 0.9302938580513, |
|
"eval_runtime": 835.8268, |
|
"eval_samples_per_second": 15.392, |
|
"eval_steps_per_second": 0.963, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.746296310835107e-05, |
|
"loss": 0.5893, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 4.736482614054784e-05, |
|
"loss": 0.5978, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 4.726668917274461e-05, |
|
"loss": 0.6057, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.716855220494139e-05, |
|
"loss": 0.6048, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.707041523713817e-05, |
|
"loss": 0.6113, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.6972278269334946e-05, |
|
"loss": 0.6142, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"eval_bleu": 9.547, |
|
"eval_gen_len": 17.426, |
|
"eval_loss": 0.9142343401908875, |
|
"eval_runtime": 836.5663, |
|
"eval_samples_per_second": 15.378, |
|
"eval_steps_per_second": 0.962, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.6874141301531726e-05, |
|
"loss": 0.6165, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.67760043337285e-05, |
|
"loss": 0.6087, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 4.667786736592528e-05, |
|
"loss": 0.5237, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 4.657973039812205e-05, |
|
"loss": 0.3981, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.648159343031883e-05, |
|
"loss": 0.4018, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 4.6383456462515606e-05, |
|
"loss": 0.4119, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"eval_bleu": 9.5055, |
|
"eval_gen_len": 17.3378, |
|
"eval_loss": 0.9879267811775208, |
|
"eval_runtime": 820.5652, |
|
"eval_samples_per_second": 15.678, |
|
"eval_steps_per_second": 0.981, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 3.947146767740577e-05, |
|
"loss": 0.3417, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 6.84, |
|
"learning_rate": 3.920825436934091e-05, |
|
"loss": 0.3358, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 3.8945041061276064e-05, |
|
"loss": 0.3331, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 3.86818277532112e-05, |
|
"loss": 0.2743, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 7.31, |
|
"learning_rate": 3.841861444514635e-05, |
|
"loss": 0.2776, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 3.8155401137081495e-05, |
|
"loss": 0.2837, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"eval_bleu": 11.0549, |
|
"eval_gen_len": 17.2982, |
|
"eval_loss": 1.0062528848648071, |
|
"eval_runtime": 964.1851, |
|
"eval_samples_per_second": 13.343, |
|
"eval_steps_per_second": 1.669, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 7.62, |
|
"learning_rate": 3.789218782901663e-05, |
|
"loss": 0.2896, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 7.77, |
|
"learning_rate": 3.762897452095178e-05, |
|
"loss": 0.2927, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 3.7365761212886926e-05, |
|
"loss": 0.298, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 3.710254790482207e-05, |
|
"loss": 0.2277, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 8.24, |
|
"learning_rate": 3.683933459675721e-05, |
|
"loss": 0.1723, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 3.657612128869236e-05, |
|
"loss": 0.1792, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"eval_bleu": 8.9031, |
|
"eval_gen_len": 17.2801, |
|
"eval_loss": 1.085568904876709, |
|
"eval_runtime": 954.4683, |
|
"eval_samples_per_second": 13.479, |
|
"eval_steps_per_second": 1.686, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 3.63129079806275e-05, |
|
"loss": 0.1867, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 3.6049694672562644e-05, |
|
"loss": 0.1921, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 3.5786481364497795e-05, |
|
"loss": 0.1974, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 9.02, |
|
"learning_rate": 3.552326805643293e-05, |
|
"loss": 0.1903, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 9.17, |
|
"learning_rate": 3.5260054748368075e-05, |
|
"loss": 0.1132, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 3.4996841440303226e-05, |
|
"loss": 0.1204, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"eval_bleu": 11.3498, |
|
"eval_gen_len": 17.2986, |
|
"eval_loss": 1.1643482446670532, |
|
"eval_runtime": 964.1632, |
|
"eval_samples_per_second": 13.343, |
|
"eval_steps_per_second": 1.669, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 9.48, |
|
"learning_rate": 3.473362813223837e-05, |
|
"loss": 0.125, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 3.447041482417351e-05, |
|
"loss": 0.1299, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 9.79, |
|
"learning_rate": 3.4207201516108657e-05, |
|
"loss": 0.1327, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 9.95, |
|
"learning_rate": 3.39439882080438e-05, |
|
"loss": 0.1355, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"learning_rate": 3.3680774899978944e-05, |
|
"loss": 0.0975, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 10.26, |
|
"learning_rate": 3.341756159191409e-05, |
|
"loss": 0.0826, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 10.26, |
|
"eval_bleu": 10.796, |
|
"eval_gen_len": 17.3627, |
|
"eval_loss": 1.231860876083374, |
|
"eval_runtime": 963.3695, |
|
"eval_samples_per_second": 13.354, |
|
"eval_steps_per_second": 1.67, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 10.42, |
|
"learning_rate": 3.315434828384924e-05, |
|
"loss": 0.0874, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 10.57, |
|
"learning_rate": 3.2891134975784375e-05, |
|
"loss": 0.0894, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 3.262792166771952e-05, |
|
"loss": 0.0952, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 3.236470835965467e-05, |
|
"loss": 0.097, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 11.04, |
|
"learning_rate": 3.2101495051589806e-05, |
|
"loss": 0.0888, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 3.1838281743524956e-05, |
|
"loss": 0.0617, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"eval_bleu": 10.6211, |
|
"eval_gen_len": 17.3748, |
|
"eval_loss": 1.2784796953201294, |
|
"eval_runtime": 963.6124, |
|
"eval_samples_per_second": 13.351, |
|
"eval_steps_per_second": 1.67, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"learning_rate": 3.15750684354601e-05, |
|
"loss": 0.0635, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 3.131185512739524e-05, |
|
"loss": 0.0682, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 11.66, |
|
"learning_rate": 3.104864181933039e-05, |
|
"loss": 0.0703, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 3.078542851126553e-05, |
|
"loss": 0.0717, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"learning_rate": 3.0522215203200675e-05, |
|
"loss": 0.0744, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"learning_rate": 3.025900189513582e-05, |
|
"loss": 0.0523, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"eval_bleu": 9.8848, |
|
"eval_gen_len": 17.3358, |
|
"eval_loss": 1.3216667175292969, |
|
"eval_runtime": 962.5757, |
|
"eval_samples_per_second": 13.365, |
|
"eval_steps_per_second": 1.672, |
|
"step": 39000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 96480, |
|
"num_train_epochs": 30, |
|
"save_steps": 3000, |
|
"total_flos": 1.976422348357632e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|