reward-gpt-b6 / checkpoint-500 /trainer_state.json
bradmin's picture
Training in progress, step 500, checkpoint
035d30c
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0712900650165393,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 8.999954848325688e-06,
"loss": 0.7277,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 8.999819394208827e-06,
"loss": 0.6455,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 8.999593640367633e-06,
"loss": 0.5428,
"step": 30
},
{
"epoch": 0.01,
"learning_rate": 8.9992775913324e-06,
"loss": 0.3486,
"step": 40
},
{
"epoch": 0.01,
"learning_rate": 8.998871253445414e-06,
"loss": 0.3524,
"step": 50
},
{
"epoch": 0.01,
"learning_rate": 8.998374634860822e-06,
"loss": 0.2701,
"step": 60
},
{
"epoch": 0.01,
"learning_rate": 8.997787745544478e-06,
"loss": 0.2106,
"step": 70
},
{
"epoch": 0.01,
"learning_rate": 8.997110597273727e-06,
"loss": 0.2596,
"step": 80
},
{
"epoch": 0.01,
"learning_rate": 8.996343203637181e-06,
"loss": 0.2015,
"step": 90
},
{
"epoch": 0.01,
"learning_rate": 8.995485580034448e-06,
"loss": 0.2462,
"step": 100
},
{
"epoch": 0.02,
"learning_rate": 8.99453774367581e-06,
"loss": 0.2172,
"step": 110
},
{
"epoch": 0.02,
"learning_rate": 8.993499713581887e-06,
"loss": 0.2655,
"step": 120
},
{
"epoch": 0.02,
"learning_rate": 8.992371510583257e-06,
"loss": 0.1823,
"step": 130
},
{
"epoch": 0.02,
"learning_rate": 8.991153157320033e-06,
"loss": 0.227,
"step": 140
},
{
"epoch": 0.02,
"learning_rate": 8.989844678241412e-06,
"loss": 0.2243,
"step": 150
},
{
"epoch": 0.02,
"learning_rate": 8.988446099605179e-06,
"loss": 0.1787,
"step": 160
},
{
"epoch": 0.02,
"learning_rate": 8.986957449477188e-06,
"loss": 0.2211,
"step": 170
},
{
"epoch": 0.03,
"learning_rate": 8.98537875773079e-06,
"loss": 0.1683,
"step": 180
},
{
"epoch": 0.03,
"learning_rate": 8.983710056046243e-06,
"loss": 0.1879,
"step": 190
},
{
"epoch": 0.03,
"learning_rate": 8.98195137791007e-06,
"loss": 0.1809,
"step": 200
},
{
"epoch": 0.03,
"learning_rate": 8.980102758614384e-06,
"loss": 0.2204,
"step": 210
},
{
"epoch": 0.03,
"learning_rate": 8.978164235256191e-06,
"loss": 0.189,
"step": 220
},
{
"epoch": 0.03,
"learning_rate": 8.976135846736634e-06,
"loss": 0.1389,
"step": 230
},
{
"epoch": 0.03,
"learning_rate": 8.97401763376022e-06,
"loss": 0.1231,
"step": 240
},
{
"epoch": 0.04,
"learning_rate": 8.971809638833998e-06,
"loss": 0.2018,
"step": 250
},
{
"epoch": 0.04,
"learning_rate": 8.96951190626671e-06,
"loss": 0.1271,
"step": 260
},
{
"epoch": 0.04,
"learning_rate": 8.967124482167896e-06,
"loss": 0.1364,
"step": 270
},
{
"epoch": 0.04,
"learning_rate": 8.964647414446981e-06,
"loss": 0.1658,
"step": 280
},
{
"epoch": 0.04,
"learning_rate": 8.9620807528123e-06,
"loss": 0.1479,
"step": 290
},
{
"epoch": 0.04,
"learning_rate": 8.959424548770104e-06,
"loss": 0.1353,
"step": 300
},
{
"epoch": 0.04,
"learning_rate": 8.95667885562353e-06,
"loss": 0.1761,
"step": 310
},
{
"epoch": 0.05,
"learning_rate": 8.953843728471533e-06,
"loss": 0.1391,
"step": 320
},
{
"epoch": 0.05,
"learning_rate": 8.950919224207774e-06,
"loss": 0.1776,
"step": 330
},
{
"epoch": 0.05,
"learning_rate": 8.94790540151948e-06,
"loss": 0.2089,
"step": 340
},
{
"epoch": 0.05,
"learning_rate": 8.94480232088627e-06,
"loss": 0.1368,
"step": 350
},
{
"epoch": 0.05,
"learning_rate": 8.941610044578937e-06,
"loss": 0.1501,
"step": 360
},
{
"epoch": 0.05,
"learning_rate": 8.938328636658202e-06,
"loss": 0.1807,
"step": 370
},
{
"epoch": 0.05,
"learning_rate": 8.934958162973425e-06,
"loss": 0.13,
"step": 380
},
{
"epoch": 0.06,
"learning_rate": 8.931498691161287e-06,
"loss": 0.1437,
"step": 390
},
{
"epoch": 0.06,
"learning_rate": 8.92795029064443e-06,
"loss": 0.0828,
"step": 400
},
{
"epoch": 0.06,
"learning_rate": 8.924313032630063e-06,
"loss": 0.1041,
"step": 410
},
{
"epoch": 0.06,
"learning_rate": 8.920586990108539e-06,
"loss": 0.2663,
"step": 420
},
{
"epoch": 0.06,
"learning_rate": 8.916772237851882e-06,
"loss": 0.2055,
"step": 430
},
{
"epoch": 0.06,
"learning_rate": 8.912868852412294e-06,
"loss": 0.151,
"step": 440
},
{
"epoch": 0.06,
"learning_rate": 8.908876912120614e-06,
"loss": 0.1638,
"step": 450
},
{
"epoch": 0.07,
"learning_rate": 8.904796497084747e-06,
"loss": 0.1399,
"step": 460
},
{
"epoch": 0.07,
"learning_rate": 8.900627689188059e-06,
"loss": 0.1615,
"step": 470
},
{
"epoch": 0.07,
"learning_rate": 8.89637057208773e-06,
"loss": 0.1209,
"step": 480
},
{
"epoch": 0.07,
"learning_rate": 8.892025231213077e-06,
"loss": 0.1605,
"step": 490
},
{
"epoch": 0.07,
"learning_rate": 8.887591753763842e-06,
"loss": 0.124,
"step": 500
},
{
"epoch": 0.07,
"eval_accuracy": 0.0,
"eval_loss": 0.13451002538204193,
"eval_runtime": 446.6893,
"eval_samples_per_second": 52.338,
"eval_steps_per_second": 8.724,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 7013,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 0.0,
"trial_name": null,
"trial_params": null
}