ZenAI-v2 / last-checkpoint /trainer_state.json
kmnis's picture
Training in progress, step 500, checkpoint
56ca31f
raw
history blame
5.93 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.21079258010118043,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.1470233384070705e-06,
"loss": 1.4705,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 5.4552635448851985e-06,
"loss": 1.5012,
"step": 20
},
{
"epoch": 0.01,
"learning_rate": 6.289174995846284e-06,
"loss": 1.385,
"step": 30
},
{
"epoch": 0.02,
"learning_rate": 6.865549773769684e-06,
"loss": 1.2729,
"step": 40
},
{
"epoch": 0.02,
"learning_rate": 7.306472495116047e-06,
"loss": 1.2978,
"step": 50
},
{
"epoch": 0.03,
"learning_rate": 7.663646266610644e-06,
"loss": 1.4354,
"step": 60
},
{
"epoch": 0.03,
"learning_rate": 7.963863644365277e-06,
"loss": 1.4443,
"step": 70
},
{
"epoch": 0.03,
"learning_rate": 8.222815896602431e-06,
"loss": 1.5108,
"step": 80
},
{
"epoch": 0.04,
"learning_rate": 8.450488154497406e-06,
"loss": 1.3964,
"step": 90
},
{
"epoch": 0.04,
"learning_rate": 8.653629372258186e-06,
"loss": 1.2203,
"step": 100
},
{
"epoch": 0.05,
"learning_rate": 8.837015420566862e-06,
"loss": 1.3174,
"step": 110
},
{
"epoch": 0.05,
"learning_rate": 9.004150364012388e-06,
"loss": 1.1764,
"step": 120
},
{
"epoch": 0.05,
"learning_rate": 9.157681445346895e-06,
"loss": 1.1009,
"step": 130
},
{
"epoch": 0.06,
"learning_rate": 9.299657503090295e-06,
"loss": 1.0422,
"step": 140
},
{
"epoch": 0.06,
"learning_rate": 9.43169675964485e-06,
"loss": 1.0693,
"step": 150
},
{
"epoch": 0.07,
"learning_rate": 9.555099634066188e-06,
"loss": 0.9296,
"step": 160
},
{
"epoch": 0.07,
"learning_rate": 9.670926871527948e-06,
"loss": 1.0445,
"step": 170
},
{
"epoch": 0.08,
"learning_rate": 9.780055047302923e-06,
"loss": 1.008,
"step": 180
},
{
"epoch": 0.08,
"learning_rate": 9.883216879176116e-06,
"loss": 0.9062,
"step": 190
},
{
"epoch": 0.08,
"learning_rate": 9.98103107994822e-06,
"loss": 1.0707,
"step": 200
},
{
"epoch": 0.09,
"learning_rate": 1e-05,
"loss": 1.0204,
"step": 210
},
{
"epoch": 0.09,
"learning_rate": 1e-05,
"loss": 0.8036,
"step": 220
},
{
"epoch": 0.1,
"learning_rate": 1e-05,
"loss": 0.9934,
"step": 230
},
{
"epoch": 0.1,
"learning_rate": 1e-05,
"loss": 0.9144,
"step": 240
},
{
"epoch": 0.11,
"learning_rate": 1e-05,
"loss": 0.8351,
"step": 250
},
{
"epoch": 0.11,
"learning_rate": 1e-05,
"loss": 0.8797,
"step": 260
},
{
"epoch": 0.11,
"learning_rate": 1e-05,
"loss": 0.7679,
"step": 270
},
{
"epoch": 0.12,
"learning_rate": 1e-05,
"loss": 0.9388,
"step": 280
},
{
"epoch": 0.12,
"learning_rate": 1e-05,
"loss": 0.9333,
"step": 290
},
{
"epoch": 0.13,
"learning_rate": 1e-05,
"loss": 0.8698,
"step": 300
},
{
"epoch": 0.13,
"learning_rate": 1e-05,
"loss": 0.7842,
"step": 310
},
{
"epoch": 0.13,
"learning_rate": 1e-05,
"loss": 0.8371,
"step": 320
},
{
"epoch": 0.14,
"learning_rate": 1e-05,
"loss": 0.7694,
"step": 330
},
{
"epoch": 0.14,
"learning_rate": 1e-05,
"loss": 0.8426,
"step": 340
},
{
"epoch": 0.15,
"learning_rate": 1e-05,
"loss": 0.7972,
"step": 350
},
{
"epoch": 0.15,
"learning_rate": 1e-05,
"loss": 0.8183,
"step": 360
},
{
"epoch": 0.16,
"learning_rate": 1e-05,
"loss": 0.7462,
"step": 370
},
{
"epoch": 0.16,
"learning_rate": 1e-05,
"loss": 0.7919,
"step": 380
},
{
"epoch": 0.16,
"learning_rate": 1e-05,
"loss": 0.8173,
"step": 390
},
{
"epoch": 0.17,
"learning_rate": 1e-05,
"loss": 0.7654,
"step": 400
},
{
"epoch": 0.17,
"learning_rate": 1e-05,
"loss": 0.7601,
"step": 410
},
{
"epoch": 0.18,
"learning_rate": 1e-05,
"loss": 0.788,
"step": 420
},
{
"epoch": 0.18,
"learning_rate": 1e-05,
"loss": 0.7488,
"step": 430
},
{
"epoch": 0.19,
"learning_rate": 1e-05,
"loss": 0.7721,
"step": 440
},
{
"epoch": 0.19,
"learning_rate": 1e-05,
"loss": 0.8289,
"step": 450
},
{
"epoch": 0.19,
"learning_rate": 1e-05,
"loss": 0.7367,
"step": 460
},
{
"epoch": 0.2,
"learning_rate": 1e-05,
"loss": 0.8422,
"step": 470
},
{
"epoch": 0.2,
"learning_rate": 1e-05,
"loss": 0.7383,
"step": 480
},
{
"epoch": 0.21,
"learning_rate": 1e-05,
"loss": 0.7729,
"step": 490
},
{
"epoch": 0.21,
"learning_rate": 1e-05,
"loss": 0.7537,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 125136682352640.0,
"trial_name": null,
"trial_params": null
}