roberta-large-wanli / trainer_state.json
alisawuffles's picture
Upload trainer_state.json
b435dba
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 16080,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 5.1813471502590676e-06,
"loss": 0.9606,
"step": 500
},
{
"epoch": 0.31,
"learning_rate": 9.976844194508766e-06,
"loss": 0.7024,
"step": 1000
},
{
"epoch": 0.47,
"learning_rate": 9.646046973205426e-06,
"loss": 0.6643,
"step": 1500
},
{
"epoch": 0.62,
"learning_rate": 9.315249751902085e-06,
"loss": 0.6306,
"step": 2000
},
{
"epoch": 0.78,
"learning_rate": 8.984452530598744e-06,
"loss": 0.6088,
"step": 2500
},
{
"epoch": 0.93,
"learning_rate": 8.653655309295402e-06,
"loss": 0.6011,
"step": 3000
},
{
"epoch": 1.0,
"eval_accuracy": 0.7731024026870728,
"eval_loss": 0.5318970084190369,
"eval_runtime": 73.9957,
"eval_samples_per_second": 132.643,
"eval_steps_per_second": 4.149,
"step": 3216
},
{
"epoch": 1.09,
"learning_rate": 8.322858087992062e-06,
"loss": 0.5493,
"step": 3500
},
{
"epoch": 1.24,
"learning_rate": 7.99206086668872e-06,
"loss": 0.5099,
"step": 4000
},
{
"epoch": 1.4,
"learning_rate": 7.66126364538538e-06,
"loss": 0.5124,
"step": 4500
},
{
"epoch": 1.55,
"learning_rate": 7.330466424082037e-06,
"loss": 0.5131,
"step": 5000
},
{
"epoch": 1.71,
"learning_rate": 6.999669202778697e-06,
"loss": 0.5117,
"step": 5500
},
{
"epoch": 1.87,
"learning_rate": 6.668871981475356e-06,
"loss": 0.5138,
"step": 6000
},
{
"epoch": 2.0,
"eval_accuracy": 0.764034628868103,
"eval_loss": 0.6075196862220764,
"eval_runtime": 74.2,
"eval_samples_per_second": 132.278,
"eval_steps_per_second": 4.137,
"step": 6432
},
{
"epoch": 2.02,
"learning_rate": 6.338074760172015e-06,
"loss": 0.4949,
"step": 6500
},
{
"epoch": 2.18,
"learning_rate": 6.007277538868674e-06,
"loss": 0.3903,
"step": 7000
},
{
"epoch": 2.33,
"learning_rate": 5.676480317565333e-06,
"loss": 0.3965,
"step": 7500
},
{
"epoch": 2.49,
"learning_rate": 5.345683096261992e-06,
"loss": 0.3876,
"step": 8000
},
{
"epoch": 2.64,
"learning_rate": 5.01488587495865e-06,
"loss": 0.3981,
"step": 8500
},
{
"epoch": 2.8,
"learning_rate": 4.6840886536553096e-06,
"loss": 0.3883,
"step": 9000
},
{
"epoch": 2.95,
"learning_rate": 4.353291432351969e-06,
"loss": 0.3833,
"step": 9500
},
{
"epoch": 3.0,
"eval_accuracy": 0.8112073540687561,
"eval_loss": 0.4962076246738434,
"eval_runtime": 74.1434,
"eval_samples_per_second": 132.379,
"eval_steps_per_second": 4.141,
"step": 9648
},
{
"epoch": 3.11,
"learning_rate": 4.022494211048628e-06,
"loss": 0.3235,
"step": 10000
},
{
"epoch": 3.26,
"learning_rate": 3.6916969897452866e-06,
"loss": 0.2933,
"step": 10500
},
{
"epoch": 3.42,
"learning_rate": 3.3608997684419452e-06,
"loss": 0.2844,
"step": 11000
},
{
"epoch": 3.58,
"learning_rate": 3.0301025471386043e-06,
"loss": 0.2992,
"step": 11500
},
{
"epoch": 3.73,
"learning_rate": 2.699305325835263e-06,
"loss": 0.2941,
"step": 12000
},
{
"epoch": 3.89,
"learning_rate": 2.3685081045319223e-06,
"loss": 0.2856,
"step": 12500
},
{
"epoch": 4.0,
"eval_accuracy": 0.7993887066841125,
"eval_loss": 0.6498498320579529,
"eval_runtime": 74.0745,
"eval_samples_per_second": 132.502,
"eval_steps_per_second": 4.144,
"step": 12864
},
{
"epoch": 4.04,
"learning_rate": 2.037710883228581e-06,
"loss": 0.2637,
"step": 13000
},
{
"epoch": 4.2,
"learning_rate": 1.70691366192524e-06,
"loss": 0.225,
"step": 13500
},
{
"epoch": 4.35,
"learning_rate": 1.3761164406218988e-06,
"loss": 0.2185,
"step": 14000
},
{
"epoch": 4.51,
"learning_rate": 1.0453192193185578e-06,
"loss": 0.2189,
"step": 14500
},
{
"epoch": 4.66,
"learning_rate": 7.145219980152167e-07,
"loss": 0.2196,
"step": 15000
},
{
"epoch": 4.82,
"learning_rate": 3.837247767118756e-07,
"loss": 0.2201,
"step": 15500
},
{
"epoch": 4.98,
"learning_rate": 5.2927555408534575e-08,
"loss": 0.2194,
"step": 16000
},
{
"epoch": 5.0,
"eval_accuracy": 0.8041772842407227,
"eval_loss": 0.6977987885475159,
"eval_runtime": 74.188,
"eval_samples_per_second": 132.299,
"eval_steps_per_second": 4.138,
"step": 16080
},
{
"epoch": 5.0,
"step": 16080,
"total_flos": 1.198526028503616e+17,
"train_loss": 0.4203688863498061,
"train_runtime": 12593.4384,
"train_samples_per_second": 40.849,
"train_steps_per_second": 1.277
}
],
"max_steps": 16080,
"num_train_epochs": 5,
"total_flos": 1.198526028503616e+17,
"trial_name": null,
"trial_params": null
}