yesj1234's picture
Upload folder using huggingface_hub
14b7e0a
{
"best_metric": 0.9049463272094727,
"best_model_checkpoint": "./tst-translation-output2/checkpoint-12000",
"epoch": 1.8217701533323214,
"eval_steps": 1500,
"global_step": 12000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 5e-05,
"loss": 1.7623,
"step": 500
},
{
"epoch": 0.15,
"learning_rate": 4.98913256102067e-05,
"loss": 1.3653,
"step": 1000
},
{
"epoch": 0.23,
"learning_rate": 4.97826512204134e-05,
"loss": 1.2499,
"step": 1500
},
{
"epoch": 0.23,
"eval_bleu": 6.1112,
"eval_gen_len": 18.0495,
"eval_loss": 1.1805654764175415,
"eval_runtime": 916.5943,
"eval_samples_per_second": 14.371,
"eval_steps_per_second": 0.899,
"step": 1500
},
{
"epoch": 0.3,
"learning_rate": 4.96739768306201e-05,
"loss": 1.2038,
"step": 2000
},
{
"epoch": 0.38,
"learning_rate": 4.95653024408268e-05,
"loss": 1.1357,
"step": 2500
},
{
"epoch": 0.46,
"learning_rate": 4.945662805103349e-05,
"loss": 1.1007,
"step": 3000
},
{
"epoch": 0.46,
"eval_bleu": 7.4845,
"eval_gen_len": 17.6068,
"eval_loss": 1.0686322450637817,
"eval_runtime": 870.5195,
"eval_samples_per_second": 15.131,
"eval_steps_per_second": 0.947,
"step": 3000
},
{
"epoch": 0.53,
"learning_rate": 4.9347953661240194e-05,
"loss": 1.0769,
"step": 3500
},
{
"epoch": 0.61,
"learning_rate": 4.923927927144689e-05,
"loss": 1.0708,
"step": 4000
},
{
"epoch": 0.68,
"learning_rate": 4.9130604881653594e-05,
"loss": 1.0334,
"step": 4500
},
{
"epoch": 0.68,
"eval_bleu": 9.0076,
"eval_gen_len": 17.6214,
"eval_loss": 1.0013340711593628,
"eval_runtime": 888.8463,
"eval_samples_per_second": 14.819,
"eval_steps_per_second": 0.927,
"step": 4500
},
{
"epoch": 0.76,
"learning_rate": 4.902193049186029e-05,
"loss": 1.0175,
"step": 5000
},
{
"epoch": 0.83,
"learning_rate": 4.891325610206699e-05,
"loss": 1.0074,
"step": 5500
},
{
"epoch": 0.91,
"learning_rate": 4.880458171227369e-05,
"loss": 0.992,
"step": 6000
},
{
"epoch": 0.91,
"eval_bleu": 8.6786,
"eval_gen_len": 17.868,
"eval_loss": 0.9599339365959167,
"eval_runtime": 933.1213,
"eval_samples_per_second": 14.116,
"eval_steps_per_second": 0.883,
"step": 6000
},
{
"epoch": 0.99,
"learning_rate": 4.8695907322480386e-05,
"loss": 0.9887,
"step": 6500
},
{
"epoch": 1.06,
"learning_rate": 4.858723293268709e-05,
"loss": 0.7834,
"step": 7000
},
{
"epoch": 1.14,
"learning_rate": 4.847855854289378e-05,
"loss": 0.7881,
"step": 7500
},
{
"epoch": 1.14,
"eval_bleu": 9.2343,
"eval_gen_len": 17.2061,
"eval_loss": 0.964439868927002,
"eval_runtime": 844.707,
"eval_samples_per_second": 15.594,
"eval_steps_per_second": 0.975,
"step": 7500
},
{
"epoch": 1.21,
"learning_rate": 4.836988415310048e-05,
"loss": 0.7667,
"step": 8000
},
{
"epoch": 1.29,
"learning_rate": 4.826120976330718e-05,
"loss": 0.7632,
"step": 8500
},
{
"epoch": 1.37,
"learning_rate": 4.815253537351388e-05,
"loss": 0.7675,
"step": 9000
},
{
"epoch": 1.37,
"eval_bleu": 10.0578,
"eval_gen_len": 17.6006,
"eval_loss": 0.942681610584259,
"eval_runtime": 1046.0952,
"eval_samples_per_second": 12.592,
"eval_steps_per_second": 0.788,
"step": 9000
},
{
"epoch": 1.44,
"learning_rate": 4.804386098372058e-05,
"loss": 0.774,
"step": 9500
},
{
"epoch": 1.52,
"learning_rate": 4.793518659392728e-05,
"loss": 0.7731,
"step": 10000
},
{
"epoch": 1.59,
"learning_rate": 4.782651220413398e-05,
"loss": 0.7665,
"step": 10500
},
{
"epoch": 1.59,
"eval_bleu": 10.436,
"eval_gen_len": 17.2095,
"eval_loss": 0.9237579703330994,
"eval_runtime": 854.5281,
"eval_samples_per_second": 15.414,
"eval_steps_per_second": 0.964,
"step": 10500
},
{
"epoch": 1.67,
"learning_rate": 4.7717837814340674e-05,
"loss": 0.761,
"step": 11000
},
{
"epoch": 1.75,
"learning_rate": 4.760916342454738e-05,
"loss": 0.7653,
"step": 11500
},
{
"epoch": 1.82,
"learning_rate": 4.750048903475407e-05,
"loss": 0.7707,
"step": 12000
},
{
"epoch": 1.82,
"eval_bleu": 10.5971,
"eval_gen_len": 17.2971,
"eval_loss": 0.9049463272094727,
"eval_runtime": 851.7484,
"eval_samples_per_second": 15.465,
"eval_steps_per_second": 0.967,
"step": 12000
}
],
"logging_steps": 500,
"max_steps": 230545,
"num_train_epochs": 35,
"save_steps": 1500,
"total_flos": 4.16088915443712e+17,
"trial_name": null,
"trial_params": null
}