t5_summarizer / trainer_state.json
Andrew0488's picture
Upload 8 files
557678b verified
raw
history blame
5.39 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 200,
"global_step": 280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.35714285714285715,
"grad_norm": 33.699462890625,
"learning_rate": 2.0000000000000003e-06,
"loss": 12.5831,
"step": 10
},
{
"epoch": 0.7142857142857143,
"grad_norm": 32.89402770996094,
"learning_rate": 4.000000000000001e-06,
"loss": 12.2228,
"step": 20
},
{
"epoch": 1.0714285714285714,
"grad_norm": 29.5042781829834,
"learning_rate": 6e-06,
"loss": 11.3684,
"step": 30
},
{
"epoch": 1.4285714285714286,
"grad_norm": 29.208715438842773,
"learning_rate": 8.000000000000001e-06,
"loss": 9.8704,
"step": 40
},
{
"epoch": 1.7857142857142856,
"grad_norm": 27.723134994506836,
"learning_rate": 1e-05,
"loss": 7.6666,
"step": 50
},
{
"epoch": 2.142857142857143,
"grad_norm": 18.65027618408203,
"learning_rate": 1.2e-05,
"loss": 5.3314,
"step": 60
},
{
"epoch": 2.5,
"grad_norm": 6.373291015625,
"learning_rate": 1.4000000000000001e-05,
"loss": 3.1671,
"step": 70
},
{
"epoch": 2.857142857142857,
"grad_norm": 2.784041404724121,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.0792,
"step": 80
},
{
"epoch": 3.2142857142857144,
"grad_norm": 1.8078209161758423,
"learning_rate": 1.8e-05,
"loss": 1.4497,
"step": 90
},
{
"epoch": 3.571428571428571,
"grad_norm": 1.2121152877807617,
"learning_rate": 2e-05,
"loss": 0.9399,
"step": 100
},
{
"epoch": 3.928571428571429,
"grad_norm": 0.5274812579154968,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.6974,
"step": 110
},
{
"epoch": 4.285714285714286,
"grad_norm": 0.44586724042892456,
"learning_rate": 2.4e-05,
"loss": 0.5473,
"step": 120
},
{
"epoch": 4.642857142857143,
"grad_norm": 0.3544086813926697,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.5048,
"step": 130
},
{
"epoch": 5.0,
"grad_norm": 0.2498295158147812,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.4708,
"step": 140
},
{
"epoch": 5.357142857142857,
"grad_norm": 0.31819191575050354,
"learning_rate": 3e-05,
"loss": 0.4269,
"step": 150
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.19436120986938477,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.4093,
"step": 160
},
{
"epoch": 6.071428571428571,
"grad_norm": 0.19919425249099731,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.4568,
"step": 170
},
{
"epoch": 6.428571428571429,
"grad_norm": 0.19824030995368958,
"learning_rate": 3.6e-05,
"loss": 0.3915,
"step": 180
},
{
"epoch": 6.785714285714286,
"grad_norm": 0.19597011804580688,
"learning_rate": 3.8e-05,
"loss": 0.4281,
"step": 190
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.19348232448101044,
"learning_rate": 4e-05,
"loss": 0.3779,
"step": 200
},
{
"epoch": 7.142857142857143,
"eval_gen_len": 234.1551,
"eval_loss": 0.38291501998901367,
"eval_rouge1": 0.9021,
"eval_rouge2": 0.8305,
"eval_rougeL": 0.8829,
"eval_runtime": 46.7129,
"eval_samples_per_second": 9.526,
"eval_steps_per_second": 0.15,
"step": 200
},
{
"epoch": 7.5,
"grad_norm": 0.14055049419403076,
"learning_rate": 4.2e-05,
"loss": 0.362,
"step": 210
},
{
"epoch": 7.857142857142857,
"grad_norm": 0.15631766617298126,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.4151,
"step": 220
},
{
"epoch": 8.214285714285714,
"grad_norm": 0.15075279772281647,
"learning_rate": 4.600000000000001e-05,
"loss": 0.3821,
"step": 230
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.1391923427581787,
"learning_rate": 4.8e-05,
"loss": 0.3422,
"step": 240
},
{
"epoch": 8.928571428571429,
"grad_norm": 0.15504033863544464,
"learning_rate": 5e-05,
"loss": 0.3775,
"step": 250
},
{
"epoch": 9.285714285714286,
"grad_norm": 0.14940936863422394,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.3694,
"step": 260
},
{
"epoch": 9.642857142857142,
"grad_norm": 0.12456735968589783,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.3817,
"step": 270
},
{
"epoch": 10.0,
"grad_norm": 0.16763611137866974,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.3245,
"step": 280
}
],
"logging_steps": 10,
"max_steps": 280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.08333608730624e+16,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}