BERNA_BERT_FATO_TESE / trainer_state.json
DIACDE's picture
att modelo
c3bcf3b verified
{
"best_metric": 0.3189895749092102,
"best_model_checkpoint": "my_awesome_model_truncaded_split_36k\\checkpoint-1837",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 7348,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2721829069134458,
"grad_norm": 4.77004337310791,
"learning_rate": 9.319542732716386e-06,
"loss": 0.5661,
"step": 500
},
{
"epoch": 0.5443658138268916,
"grad_norm": 13.415882110595703,
"learning_rate": 8.639085465432772e-06,
"loss": 0.457,
"step": 1000
},
{
"epoch": 0.8165487207403375,
"grad_norm": 13.942790985107422,
"learning_rate": 7.958628198149157e-06,
"loss": 0.3841,
"step": 1500
},
{
"epoch": 1.0,
"eval_accuracy": 0.8633455832312509,
"eval_loss": 0.3189895749092102,
"eval_runtime": 2115.733,
"eval_samples_per_second": 3.473,
"eval_steps_per_second": 0.217,
"step": 1837
},
{
"epoch": 1.0887316276537833,
"grad_norm": 21.668664932250977,
"learning_rate": 7.2781709308655426e-06,
"loss": 0.343,
"step": 2000
},
{
"epoch": 1.360914534567229,
"grad_norm": 14.608024597167969,
"learning_rate": 6.597713663581927e-06,
"loss": 0.3067,
"step": 2500
},
{
"epoch": 1.633097441480675,
"grad_norm": 11.27497673034668,
"learning_rate": 5.917256396298313e-06,
"loss": 0.2714,
"step": 3000
},
{
"epoch": 1.905280348394121,
"grad_norm": 8.441925048828125,
"learning_rate": 5.236799129014698e-06,
"loss": 0.2698,
"step": 3500
},
{
"epoch": 2.0,
"eval_accuracy": 0.8595345038791343,
"eval_loss": 0.3298398554325104,
"eval_runtime": 2168.9042,
"eval_samples_per_second": 3.387,
"eval_steps_per_second": 0.212,
"step": 3674
},
{
"epoch": 2.1774632553075666,
"grad_norm": 35.317108154296875,
"learning_rate": 4.5563418617310835e-06,
"loss": 0.2298,
"step": 4000
},
{
"epoch": 2.4496461622210126,
"grad_norm": 9.94012451171875,
"learning_rate": 3.875884594447469e-06,
"loss": 0.2087,
"step": 4500
},
{
"epoch": 2.721829069134458,
"grad_norm": 24.299028396606445,
"learning_rate": 3.1954273271638544e-06,
"loss": 0.2119,
"step": 5000
},
{
"epoch": 2.9940119760479043,
"grad_norm": 5.556156158447266,
"learning_rate": 2.5149700598802396e-06,
"loss": 0.2042,
"step": 5500
},
{
"epoch": 3.0,
"eval_accuracy": 0.881312100176943,
"eval_loss": 0.3292213976383209,
"eval_runtime": 2182.1168,
"eval_samples_per_second": 3.367,
"eval_steps_per_second": 0.211,
"step": 5511
},
{
"epoch": 3.26619488296135,
"grad_norm": 30.425508499145508,
"learning_rate": 1.834512792596625e-06,
"loss": 0.1642,
"step": 6000
},
{
"epoch": 3.538377789874796,
"grad_norm": 10.257048606872559,
"learning_rate": 1.1540555253130105e-06,
"loss": 0.1731,
"step": 6500
},
{
"epoch": 3.810560696788242,
"grad_norm": 37.602664947509766,
"learning_rate": 4.735982580293958e-07,
"loss": 0.1641,
"step": 7000
},
{
"epoch": 4.0,
"eval_accuracy": 0.8960119776779638,
"eval_loss": 0.34636014699935913,
"eval_runtime": 2178.6484,
"eval_samples_per_second": 3.372,
"eval_steps_per_second": 0.211,
"step": 7348
}
],
"logging_steps": 500,
"max_steps": 7348,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.907596283903792e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}