|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 3.568065810991625,
|
|
"eval_steps": 500,
|
|
"global_step": 9000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.19822587838842362,
|
|
"grad_norm": 47.1356201171875,
|
|
"learning_rate": 0.0000024777006937561945,
|
|
"loss": 21.4029,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.19822587838842362,
|
|
"eval_loss": 586.7496948242188,
|
|
"eval_runtime": 573.4198,
|
|
"eval_samples_per_second": 53,
|
|
"eval_steps_per_second": 6.625,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.39645175677684724,
|
|
"grad_norm": 50.89448165893555,
|
|
"learning_rate": 0.000004955401387512389,
|
|
"loss": 11.7925,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.39645175677684724,
|
|
"eval_loss": 584.6571044921875,
|
|
"eval_runtime": 571.5589,
|
|
"eval_samples_per_second": 53.172,
|
|
"eval_steps_per_second": 6.647,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.5946776351652708,
|
|
"grad_norm": 56.098140716552734,
|
|
"learning_rate": 0.000007433102081268584,
|
|
"loss": 9.7243,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.5946776351652708,
|
|
"eval_loss": 438.975830078125,
|
|
"eval_runtime": 571.006,
|
|
"eval_samples_per_second": 53.224,
|
|
"eval_steps_per_second": 6.653,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 0.7929035135536945,
|
|
"grad_norm": 49.88700485229492,
|
|
"learning_rate": 0.000009910802775024778,
|
|
"loss": 8.6629,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 0.7929035135536945,
|
|
"eval_loss": 443.8860778808594,
|
|
"eval_runtime": 569.8536,
|
|
"eval_samples_per_second": 53.331,
|
|
"eval_steps_per_second": 6.667,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 0.991129391942118,
|
|
"grad_norm": 37.3896484375,
|
|
"learning_rate": 0.00000991223686038578,
|
|
"loss": 7.7527,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 0.991129391942118,
|
|
"eval_loss": 359.1586608886719,
|
|
"eval_runtime": 570.3107,
|
|
"eval_samples_per_second": 53.289,
|
|
"eval_steps_per_second": 6.661,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 1.1893552703305417,
|
|
"grad_norm": 45.05537414550781,
|
|
"learning_rate": 0.000009639072360499928,
|
|
"loss": 7.0532,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 1.1893552703305417,
|
|
"eval_loss": 297.4908447265625,
|
|
"eval_runtime": 573.7765,
|
|
"eval_samples_per_second": 52.967,
|
|
"eval_steps_per_second": 6.621,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 1.3875811487189653,
|
|
"grad_norm": 42.01622772216797,
|
|
"learning_rate": 0.000009190700553732768,
|
|
"loss": 6.8798,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 1.3875811487189653,
|
|
"eval_loss": 272.1216125488281,
|
|
"eval_runtime": 570.5906,
|
|
"eval_samples_per_second": 53.262,
|
|
"eval_steps_per_second": 6.658,
|
|
"step": 3500
|
|
},
|
|
{
|
|
"epoch": 1.5858070271073887,
|
|
"grad_norm": 26.336191177368164,
|
|
"learning_rate": 0.000008584055432624292,
|
|
"loss": 6.5736,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 1.5858070271073887,
|
|
"eval_loss": 279.0899353027344,
|
|
"eval_runtime": 578.6986,
|
|
"eval_samples_per_second": 52.516,
|
|
"eval_steps_per_second": 6.565,
|
|
"step": 4000
|
|
},
|
|
{
|
|
"epoch": 1.7840329054958124,
|
|
"grad_norm": 35.772953033447266,
|
|
"learning_rate": 0.00000784204861605915,
|
|
"loss": 6.3985,
|
|
"step": 4500
|
|
},
|
|
{
|
|
"epoch": 1.7840329054958124,
|
|
"eval_loss": 221.53627014160156,
|
|
"eval_runtime": 569.7883,
|
|
"eval_samples_per_second": 53.337,
|
|
"eval_steps_per_second": 6.667,
|
|
"step": 4500
|
|
},
|
|
{
|
|
"epoch": 1.982258783884236,
|
|
"grad_norm": 34.2270622253418,
|
|
"learning_rate": 0.000006992704029062722,
|
|
"loss": 6.0866,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 1.982258783884236,
|
|
"eval_loss": 190.98533630371094,
|
|
"eval_runtime": 571.0123,
|
|
"eval_samples_per_second": 53.223,
|
|
"eval_steps_per_second": 6.653,
|
|
"step": 5000
|
|
},
|
|
{
|
|
"epoch": 2.1804846622726597,
|
|
"grad_norm": 34.992889404296875,
|
|
"learning_rate": 0.000006068099502317051,
|
|
"loss": 5.732,
|
|
"step": 5500
|
|
},
|
|
{
|
|
"epoch": 2.1804846622726597,
|
|
"eval_loss": 225.56539916992188,
|
|
"eval_runtime": 570.6646,
|
|
"eval_samples_per_second": 53.255,
|
|
"eval_steps_per_second": 6.657,
|
|
"step": 5500
|
|
},
|
|
{
|
|
"epoch": 2.3787105406610833,
|
|
"grad_norm": 35.05290222167969,
|
|
"learning_rate": 0.000005103155264797376,
|
|
"loss": 5.8384,
|
|
"step": 6000
|
|
},
|
|
{
|
|
"epoch": 2.3787105406610833,
|
|
"eval_loss": 206.36277770996094,
|
|
"eval_runtime": 569.1115,
|
|
"eval_samples_per_second": 53.401,
|
|
"eval_steps_per_second": 6.675,
|
|
"step": 6000
|
|
},
|
|
{
|
|
"epoch": 2.576936419049507,
|
|
"grad_norm": 38.268341064453125,
|
|
"learning_rate": 0.0000041343150854317275,
|
|
"loss": 5.6189,
|
|
"step": 6500
|
|
},
|
|
{
|
|
"epoch": 2.576936419049507,
|
|
"eval_loss": 211.91796875,
|
|
"eval_runtime": 569.7007,
|
|
"eval_samples_per_second": 53.346,
|
|
"eval_steps_per_second": 6.668,
|
|
"step": 6500
|
|
},
|
|
{
|
|
"epoch": 2.7751622974379306,
|
|
"grad_norm": 42.83818054199219,
|
|
"learning_rate": 0.0000031981698740904465,
|
|
"loss": 5.5445,
|
|
"step": 7000
|
|
},
|
|
{
|
|
"epoch": 2.7751622974379306,
|
|
"eval_loss": 199.4452362060547,
|
|
"eval_runtime": 570.9073,
|
|
"eval_samples_per_second": 53.233,
|
|
"eval_steps_per_second": 6.654,
|
|
"step": 7000
|
|
},
|
|
{
|
|
"epoch": 2.9733881758263543,
|
|
"grad_norm": 42.24338150024414,
|
|
"learning_rate": 0.0000023300757253939837,
|
|
"loss": 5.546,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 2.9733881758263543,
|
|
"eval_loss": 190.01214599609375,
|
|
"eval_runtime": 571.9686,
|
|
"eval_samples_per_second": 53.134,
|
|
"eval_steps_per_second": 6.642,
|
|
"step": 7500
|
|
},
|
|
{
|
|
"epoch": 3.171614054214778,
|
|
"grad_norm": 34.228065490722656,
|
|
"learning_rate": 0.0000015628185987094685,
|
|
"loss": 5.3668,
|
|
"step": 8000
|
|
},
|
|
{
|
|
"epoch": 3.171614054214778,
|
|
"eval_loss": 194.59649658203125,
|
|
"eval_runtime": 571.773,
|
|
"eval_samples_per_second": 53.152,
|
|
"eval_steps_per_second": 6.644,
|
|
"step": 8000
|
|
},
|
|
{
|
|
"epoch": 3.3698399326032016,
|
|
"grad_norm": 30.128808975219727,
|
|
"learning_rate": 9.253760663628896e-7,
|
|
"loss": 5.338,
|
|
"step": 8500
|
|
},
|
|
{
|
|
"epoch": 3.3698399326032016,
|
|
"eval_loss": 173.66778564453125,
|
|
"eval_runtime": 569.7472,
|
|
"eval_samples_per_second": 53.341,
|
|
"eval_steps_per_second": 6.668,
|
|
"step": 8500
|
|
},
|
|
{
|
|
"epoch": 3.568065810991625,
|
|
"grad_norm": 54.32496643066406,
|
|
"learning_rate": 4.4182289604595254e-7,
|
|
"loss": 5.2961,
|
|
"step": 9000
|
|
},
|
|
{
|
|
"epoch": 3.568065810991625,
|
|
"eval_loss": 184.6847381591797,
|
|
"eval_runtime": 571.6945,
|
|
"eval_samples_per_second": 53.16,
|
|
"eval_steps_per_second": 6.645,
|
|
"step": 9000
|
|
}
|
|
],
|
|
"logging_steps": 500,
|
|
"max_steps": 10088,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 4,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 0,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
} |