bbytxt's picture
Training in progress, epoch 0, checkpoint
5566df4 verified
raw
history blame
8.69 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.1662510390689942,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0008312551953449709,
"eval_loss": 1.7031567096710205,
"eval_runtime": 38.9167,
"eval_samples_per_second": 13.028,
"eval_steps_per_second": 6.527,
"step": 1
},
{
"epoch": 0.004156275976724855,
"grad_norm": 8.313850402832031,
"learning_rate": 5e-05,
"loss": 5.5336,
"step": 5
},
{
"epoch": 0.00831255195344971,
"grad_norm": 11.550032615661621,
"learning_rate": 0.0001,
"loss": 4.8425,
"step": 10
},
{
"epoch": 0.012468827930174564,
"grad_norm": 15.0049467086792,
"learning_rate": 9.98292246503335e-05,
"loss": 4.3692,
"step": 15
},
{
"epoch": 0.01662510390689942,
"grad_norm": 13.11844253540039,
"learning_rate": 9.931806517013612e-05,
"loss": 4.9362,
"step": 20
},
{
"epoch": 0.020781379883624274,
"grad_norm": 12.807136535644531,
"learning_rate": 9.847001329696653e-05,
"loss": 4.7307,
"step": 25
},
{
"epoch": 0.02493765586034913,
"grad_norm": 13.963868141174316,
"learning_rate": 9.729086208503174e-05,
"loss": 4.3436,
"step": 30
},
{
"epoch": 0.029093931837073983,
"grad_norm": 15.964492797851562,
"learning_rate": 9.578866633275288e-05,
"loss": 5.1,
"step": 35
},
{
"epoch": 0.03325020781379884,
"grad_norm": 14.075353622436523,
"learning_rate": 9.397368756032445e-05,
"loss": 3.9034,
"step": 40
},
{
"epoch": 0.03740648379052369,
"grad_norm": 19.31031608581543,
"learning_rate": 9.185832391312644e-05,
"loss": 4.8119,
"step": 45
},
{
"epoch": 0.04156275976724855,
"grad_norm": 16.785146713256836,
"learning_rate": 8.945702546981969e-05,
"loss": 5.285,
"step": 50
},
{
"epoch": 0.04156275976724855,
"eval_loss": 1.1468664407730103,
"eval_runtime": 39.3179,
"eval_samples_per_second": 12.895,
"eval_steps_per_second": 6.46,
"step": 50
},
{
"epoch": 0.0457190357439734,
"grad_norm": 8.89350700378418,
"learning_rate": 8.678619553365659e-05,
"loss": 4.4179,
"step": 55
},
{
"epoch": 0.04987531172069826,
"grad_norm": 9.902567863464355,
"learning_rate": 8.386407858128706e-05,
"loss": 4.1408,
"step": 60
},
{
"epoch": 0.05403158769742311,
"grad_norm": 9.637911796569824,
"learning_rate": 8.07106356344834e-05,
"loss": 4.3677,
"step": 65
},
{
"epoch": 0.058187863674147966,
"grad_norm": 11.88183307647705,
"learning_rate": 7.734740790612136e-05,
"loss": 4.5824,
"step": 70
},
{
"epoch": 0.06234413965087282,
"grad_norm": 13.11361026763916,
"learning_rate": 7.379736965185368e-05,
"loss": 4.5274,
"step": 75
},
{
"epoch": 0.06650041562759768,
"grad_norm": 10.361695289611816,
"learning_rate": 7.008477123264848e-05,
"loss": 4.5816,
"step": 80
},
{
"epoch": 0.07065669160432253,
"grad_norm": 13.685104370117188,
"learning_rate": 6.623497346023418e-05,
"loss": 4.2433,
"step": 85
},
{
"epoch": 0.07481296758104738,
"grad_norm": 14.716741561889648,
"learning_rate": 6.227427435703997e-05,
"loss": 5.1209,
"step": 90
},
{
"epoch": 0.07896924355777224,
"grad_norm": 16.79210662841797,
"learning_rate": 5.8229729514036705e-05,
"loss": 5.0024,
"step": 95
},
{
"epoch": 0.0831255195344971,
"grad_norm": 15.1632080078125,
"learning_rate": 5.4128967273616625e-05,
"loss": 5.2588,
"step": 100
},
{
"epoch": 0.0831255195344971,
"eval_loss": 1.1230735778808594,
"eval_runtime": 39.4665,
"eval_samples_per_second": 12.846,
"eval_steps_per_second": 6.436,
"step": 100
},
{
"epoch": 0.08728179551122195,
"grad_norm": 9.77318286895752,
"learning_rate": 5e-05,
"loss": 4.0232,
"step": 105
},
{
"epoch": 0.0914380714879468,
"grad_norm": 10.034825325012207,
"learning_rate": 4.5871032726383386e-05,
"loss": 4.1443,
"step": 110
},
{
"epoch": 0.09559434746467166,
"grad_norm": 9.069893836975098,
"learning_rate": 4.17702704859633e-05,
"loss": 4.2779,
"step": 115
},
{
"epoch": 0.09975062344139651,
"grad_norm": 13.375626564025879,
"learning_rate": 3.772572564296005e-05,
"loss": 4.5112,
"step": 120
},
{
"epoch": 0.10390689941812137,
"grad_norm": 10.194912910461426,
"learning_rate": 3.3765026539765834e-05,
"loss": 4.295,
"step": 125
},
{
"epoch": 0.10806317539484622,
"grad_norm": 11.304728507995605,
"learning_rate": 2.991522876735154e-05,
"loss": 4.1605,
"step": 130
},
{
"epoch": 0.11221945137157108,
"grad_norm": 9.311614990234375,
"learning_rate": 2.6202630348146324e-05,
"loss": 4.3502,
"step": 135
},
{
"epoch": 0.11637572734829593,
"grad_norm": 21.666893005371094,
"learning_rate": 2.2652592093878666e-05,
"loss": 4.6441,
"step": 140
},
{
"epoch": 0.12053200332502079,
"grad_norm": 13.352088928222656,
"learning_rate": 1.928936436551661e-05,
"loss": 4.7608,
"step": 145
},
{
"epoch": 0.12468827930174564,
"grad_norm": 14.888147354125977,
"learning_rate": 1.6135921418712956e-05,
"loss": 4.6675,
"step": 150
},
{
"epoch": 0.12468827930174564,
"eval_loss": 1.1012681722640991,
"eval_runtime": 39.5131,
"eval_samples_per_second": 12.831,
"eval_steps_per_second": 6.428,
"step": 150
},
{
"epoch": 0.1288445552784705,
"grad_norm": 10.447840690612793,
"learning_rate": 1.3213804466343421e-05,
"loss": 4.186,
"step": 155
},
{
"epoch": 0.13300083125519535,
"grad_norm": 13.874579429626465,
"learning_rate": 1.0542974530180327e-05,
"loss": 4.3065,
"step": 160
},
{
"epoch": 0.1371571072319202,
"grad_norm": 10.623826026916504,
"learning_rate": 8.141676086873572e-06,
"loss": 3.7722,
"step": 165
},
{
"epoch": 0.14131338320864506,
"grad_norm": 11.924473762512207,
"learning_rate": 6.026312439675552e-06,
"loss": 4.3879,
"step": 170
},
{
"epoch": 0.14546965918536992,
"grad_norm": 16.149940490722656,
"learning_rate": 4.2113336672471245e-06,
"loss": 4.9389,
"step": 175
},
{
"epoch": 0.14962593516209477,
"grad_norm": 11.651994705200195,
"learning_rate": 2.7091379149682685e-06,
"loss": 4.0122,
"step": 180
},
{
"epoch": 0.15378221113881962,
"grad_norm": 10.336478233337402,
"learning_rate": 1.5299867030334814e-06,
"loss": 4.6848,
"step": 185
},
{
"epoch": 0.15793848711554448,
"grad_norm": 12.979530334472656,
"learning_rate": 6.819348298638839e-07,
"loss": 4.2951,
"step": 190
},
{
"epoch": 0.16209476309226933,
"grad_norm": 14.301395416259766,
"learning_rate": 1.7077534966650766e-07,
"loss": 4.8291,
"step": 195
},
{
"epoch": 0.1662510390689942,
"grad_norm": 17.349889755249023,
"learning_rate": 0.0,
"loss": 5.4328,
"step": 200
},
{
"epoch": 0.1662510390689942,
"eval_loss": 1.1004496812820435,
"eval_runtime": 39.5297,
"eval_samples_per_second": 12.826,
"eval_steps_per_second": 6.426,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.01377152024576e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}