roberta-base-stsb / trainer_state.json
Jeremiah Zhou
End of training
c0f41ca
{
"best_metric": 0.907904999413384,
"best_model_checkpoint": "./fine-tune/roberta-base/stsb/checkpoint-3600",
"epoch": 10.0,
"global_step": 3600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_combined_score": 0.8800131509784977,
"eval_loss": 0.6201746463775635,
"eval_pearson": 0.8786977959496777,
"eval_runtime": 2.5693,
"eval_samples_per_second": 583.815,
"eval_spearmanr": 0.8813285060073178,
"eval_steps_per_second": 73.171,
"step": 360
},
{
"epoch": 1.39,
"learning_rate": 1.832151300236407e-05,
"loss": 1.6425,
"step": 500
},
{
"epoch": 2.0,
"eval_combined_score": 0.8999728441010757,
"eval_loss": 0.4863864779472351,
"eval_pearson": 0.9007776859136989,
"eval_runtime": 2.7619,
"eval_samples_per_second": 543.11,
"eval_spearmanr": 0.8991680022884524,
"eval_steps_per_second": 68.07,
"step": 720
},
{
"epoch": 2.78,
"learning_rate": 1.536643026004728e-05,
"loss": 0.3629,
"step": 1000
},
{
"epoch": 3.0,
"eval_combined_score": 0.9029742067980153,
"eval_loss": 0.4201485216617584,
"eval_pearson": 0.9043101427011008,
"eval_runtime": 2.9861,
"eval_samples_per_second": 502.319,
"eval_spearmanr": 0.90163827089493,
"eval_steps_per_second": 62.957,
"step": 1080
},
{
"epoch": 4.0,
"eval_combined_score": 0.9027351005024904,
"eval_loss": 0.4686241149902344,
"eval_pearson": 0.9051726321017076,
"eval_runtime": 2.7866,
"eval_samples_per_second": 538.296,
"eval_spearmanr": 0.9002975689032732,
"eval_steps_per_second": 67.466,
"step": 1440
},
{
"epoch": 4.17,
"learning_rate": 1.2411347517730496e-05,
"loss": 0.2212,
"step": 1500
},
{
"epoch": 5.0,
"eval_combined_score": 0.9046302878448043,
"eval_loss": 0.4621739387512207,
"eval_pearson": 0.9061488304695196,
"eval_runtime": 2.6495,
"eval_samples_per_second": 566.149,
"eval_spearmanr": 0.903111745220089,
"eval_steps_per_second": 70.957,
"step": 1800
},
{
"epoch": 5.56,
"learning_rate": 9.456264775413712e-06,
"loss": 0.1556,
"step": 2000
},
{
"epoch": 6.0,
"eval_combined_score": 0.9075265152567864,
"eval_loss": 0.39520207047462463,
"eval_pearson": 0.9085559733517121,
"eval_runtime": 2.6007,
"eval_samples_per_second": 576.77,
"eval_spearmanr": 0.9064970571618607,
"eval_steps_per_second": 72.288,
"step": 2160
},
{
"epoch": 6.94,
"learning_rate": 6.501182033096928e-06,
"loss": 0.1162,
"step": 2500
},
{
"epoch": 7.0,
"eval_combined_score": 0.9075203795014006,
"eval_loss": 0.4270859360694885,
"eval_pearson": 0.90805524065019,
"eval_runtime": 2.7856,
"eval_samples_per_second": 538.486,
"eval_spearmanr": 0.9069855183526113,
"eval_steps_per_second": 67.49,
"step": 2520
},
{
"epoch": 8.0,
"eval_combined_score": 0.9084577383816617,
"eval_loss": 0.4169144630432129,
"eval_pearson": 0.9093801502025906,
"eval_runtime": 2.9236,
"eval_samples_per_second": 513.074,
"eval_spearmanr": 0.9075353265607328,
"eval_steps_per_second": 64.305,
"step": 2880
},
{
"epoch": 8.33,
"learning_rate": 3.5460992907801423e-06,
"loss": 0.0887,
"step": 3000
},
{
"epoch": 9.0,
"eval_combined_score": 0.908268388971433,
"eval_loss": 0.4382895529270172,
"eval_pearson": 0.9091264407921222,
"eval_runtime": 2.8034,
"eval_samples_per_second": 535.07,
"eval_spearmanr": 0.9074103371507438,
"eval_steps_per_second": 67.062,
"step": 3240
},
{
"epoch": 9.72,
"learning_rate": 5.91016548463357e-07,
"loss": 0.0717,
"step": 3500
},
{
"epoch": 10.0,
"eval_combined_score": 0.9089913549074231,
"eval_loss": 0.41549935936927795,
"eval_pearson": 0.9100777104014621,
"eval_runtime": 2.5991,
"eval_samples_per_second": 577.126,
"eval_spearmanr": 0.907904999413384,
"eval_steps_per_second": 72.333,
"step": 3600
},
{
"epoch": 10.0,
"step": 3600,
"total_flos": 3781529690027520.0,
"train_loss": 0.37126386721928917,
"train_runtime": 497.076,
"train_samples_per_second": 115.656,
"train_steps_per_second": 7.242
}
],
"max_steps": 3600,
"num_train_epochs": 10,
"total_flos": 3781529690027520.0,
"trial_name": null,
"trial_params": null
}