|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 252, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 12.131371063020744, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.8467, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 2.8702347939026684, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.7188, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 2.5230518976542506, |
|
"learning_rate": 3.947368421052632e-06, |
|
"loss": 0.635, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 1.6425584319246187, |
|
"learning_rate": 4.998922515567496e-06, |
|
"loss": 0.5914, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"grad_norm": 3.9561567299131952, |
|
"learning_rate": 4.9613079925074435e-06, |
|
"loss": 0.5662, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 2.8506571881322786, |
|
"learning_rate": 4.870744433740688e-06, |
|
"loss": 0.5495, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 2.9284294355648997, |
|
"learning_rate": 4.729180093253106e-06, |
|
"loss": 0.5354, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.608687573355936, |
|
"learning_rate": 4.53966038240406e-06, |
|
"loss": 0.523, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.06461798399686813, |
|
"eval_runtime": 89.5731, |
|
"eval_samples_per_second": 202.103, |
|
"eval_steps_per_second": 0.402, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 3.5277524106115705, |
|
"learning_rate": 4.306262355332006e-06, |
|
"loss": 0.5029, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 2.9157076219013502, |
|
"learning_rate": 4.034007001082985e-06, |
|
"loss": 0.4873, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"grad_norm": 1.9515685257710755, |
|
"learning_rate": 3.7287512292828364e-06, |
|
"loss": 0.4835, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 3.0996510453406376, |
|
"learning_rate": 3.39706187301784e-06, |
|
"loss": 0.4795, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5476190476190477, |
|
"grad_norm": 1.6487179646275694, |
|
"learning_rate": 3.0460744194443658e-06, |
|
"loss": 0.4738, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 1.6412743986342637, |
|
"learning_rate": 2.68333950719376e-06, |
|
"loss": 0.4706, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 1.756612697830082, |
|
"learning_rate": 2.3166604928062407e-06, |
|
"loss": 0.4675, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 1.0971644453746074, |
|
"learning_rate": 1.9539255805556346e-06, |
|
"loss": 0.4633, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.06034243851900101, |
|
"eval_runtime": 89.3139, |
|
"eval_samples_per_second": 202.69, |
|
"eval_steps_per_second": 0.403, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 2.0238095238095237, |
|
"grad_norm": 1.174978789267966, |
|
"learning_rate": 1.6029381269821607e-06, |
|
"loss": 0.4564, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 0.8542389334158027, |
|
"learning_rate": 1.2712487707171645e-06, |
|
"loss": 0.4356, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.261904761904762, |
|
"grad_norm": 0.7154367298759456, |
|
"learning_rate": 9.659929989170156e-07, |
|
"loss": 0.4335, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.5531641628675295, |
|
"learning_rate": 6.93737644667995e-07, |
|
"loss": 0.43, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.505015673695498, |
|
"learning_rate": 4.6033961759594045e-07, |
|
"loss": 0.429, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.48866524545920653, |
|
"learning_rate": 2.708199067468939e-07, |
|
"loss": 0.4311, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.738095238095238, |
|
"grad_norm": 0.46556587701838564, |
|
"learning_rate": 1.2925556625931174e-07, |
|
"loss": 0.4303, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.45472603633578945, |
|
"learning_rate": 3.869200749255703e-08, |
|
"loss": 0.4296, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.9761904761904763, |
|
"grad_norm": 0.43987009685051254, |
|
"learning_rate": 1.0774844325039946e-09, |
|
"loss": 0.4277, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.05977506935596466, |
|
"eval_runtime": 88.5977, |
|
"eval_samples_per_second": 204.328, |
|
"eval_steps_per_second": 0.406, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 252, |
|
"total_flos": 3375200049561600.0, |
|
"train_loss": 0.5070906683566079, |
|
"train_runtime": 15180.8601, |
|
"train_samples_per_second": 67.971, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 252, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3375200049561600.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|