|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 59.922928709055874, |
|
"eval_steps": 1866, |
|
"global_step": 18660, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.992292870905588, |
|
"grad_norm": 1.8218445777893066, |
|
"learning_rate": 9.002143622722401e-06, |
|
"loss": 2.4434, |
|
"step": 1866 |
|
}, |
|
{ |
|
"epoch": 5.992292870905588, |
|
"eval_accuracy": 0.03163401873707264, |
|
"eval_loss": 3.7035000324249268, |
|
"eval_runtime": 31.9522, |
|
"eval_samples_per_second": 257.228, |
|
"eval_steps_per_second": 6.447, |
|
"step": 1866 |
|
}, |
|
{ |
|
"epoch": 11.984585741811175, |
|
"grad_norm": 4.5330424308776855, |
|
"learning_rate": 8.0021436227224e-06, |
|
"loss": 2.2689, |
|
"step": 3732 |
|
}, |
|
{ |
|
"epoch": 11.984585741811175, |
|
"eval_accuracy": 0.031208176177150503, |
|
"eval_loss": 3.9282093048095703, |
|
"eval_runtime": 32.0567, |
|
"eval_samples_per_second": 256.39, |
|
"eval_steps_per_second": 6.426, |
|
"step": 3732 |
|
}, |
|
{ |
|
"epoch": 17.976878612716764, |
|
"grad_norm": 7.768856048583984, |
|
"learning_rate": 7.002143622722402e-06, |
|
"loss": 2.1311, |
|
"step": 5598 |
|
}, |
|
{ |
|
"epoch": 17.976878612716764, |
|
"eval_accuracy": 0.0324045909883603, |
|
"eval_loss": 4.188955307006836, |
|
"eval_runtime": 31.9998, |
|
"eval_samples_per_second": 256.845, |
|
"eval_steps_per_second": 6.438, |
|
"step": 5598 |
|
}, |
|
{ |
|
"epoch": 23.96917148362235, |
|
"grad_norm": 4.701458930969238, |
|
"learning_rate": 6.002679528403002e-06, |
|
"loss": 2.0473, |
|
"step": 7464 |
|
}, |
|
{ |
|
"epoch": 23.96917148362235, |
|
"eval_accuracy": 0.03166443606278136, |
|
"eval_loss": 4.221826076507568, |
|
"eval_runtime": 31.9366, |
|
"eval_samples_per_second": 257.353, |
|
"eval_steps_per_second": 6.45, |
|
"step": 7464 |
|
}, |
|
{ |
|
"epoch": 29.961464354527937, |
|
"grad_norm": 4.455111503601074, |
|
"learning_rate": 5.003215434083602e-06, |
|
"loss": 2.0065, |
|
"step": 9330 |
|
}, |
|
{ |
|
"epoch": 29.961464354527937, |
|
"eval_accuracy": 0.03170702031877357, |
|
"eval_loss": 4.196821212768555, |
|
"eval_runtime": 31.9205, |
|
"eval_samples_per_second": 257.484, |
|
"eval_steps_per_second": 6.454, |
|
"step": 9330 |
|
}, |
|
{ |
|
"epoch": 35.95375722543353, |
|
"grad_norm": 4.213284492492676, |
|
"learning_rate": 4.003751339764202e-06, |
|
"loss": 1.9816, |
|
"step": 11196 |
|
}, |
|
{ |
|
"epoch": 35.95375722543353, |
|
"eval_accuracy": 0.031127063308593907, |
|
"eval_loss": 4.327696323394775, |
|
"eval_runtime": 32.0629, |
|
"eval_samples_per_second": 256.34, |
|
"eval_steps_per_second": 6.425, |
|
"step": 11196 |
|
}, |
|
{ |
|
"epoch": 41.946050096339114, |
|
"grad_norm": 4.925486087799072, |
|
"learning_rate": 3.004287245444802e-06, |
|
"loss": 1.9593, |
|
"step": 13062 |
|
}, |
|
{ |
|
"epoch": 41.946050096339114, |
|
"eval_accuracy": 0.031164722854709472, |
|
"eval_loss": 4.440001487731934, |
|
"eval_runtime": 31.9997, |
|
"eval_samples_per_second": 256.846, |
|
"eval_steps_per_second": 6.438, |
|
"step": 13062 |
|
}, |
|
{ |
|
"epoch": 47.9383429672447, |
|
"grad_norm": 3.112938404083252, |
|
"learning_rate": 2.004823151125402e-06, |
|
"loss": 1.9448, |
|
"step": 14928 |
|
}, |
|
{ |
|
"epoch": 47.9383429672447, |
|
"eval_accuracy": 0.031116924200024335, |
|
"eval_loss": 4.489612102508545, |
|
"eval_runtime": 31.9592, |
|
"eval_samples_per_second": 257.172, |
|
"eval_steps_per_second": 6.446, |
|
"step": 14928 |
|
}, |
|
{ |
|
"epoch": 53.93063583815029, |
|
"grad_norm": 1.5867228507995605, |
|
"learning_rate": 1.005359056806002e-06, |
|
"loss": 1.9352, |
|
"step": 16794 |
|
}, |
|
{ |
|
"epoch": 53.93063583815029, |
|
"eval_accuracy": 0.03109326628002866, |
|
"eval_loss": 4.571017265319824, |
|
"eval_runtime": 31.9814, |
|
"eval_samples_per_second": 256.993, |
|
"eval_steps_per_second": 6.441, |
|
"step": 16794 |
|
}, |
|
{ |
|
"epoch": 59.922928709055874, |
|
"grad_norm": 4.193638801574707, |
|
"learning_rate": 5.894962486602358e-09, |
|
"loss": 1.9342, |
|
"step": 18660 |
|
}, |
|
{ |
|
"epoch": 59.922928709055874, |
|
"eval_accuracy": 0.031013505292614672, |
|
"eval_loss": 4.6299943923950195, |
|
"eval_runtime": 31.9345, |
|
"eval_samples_per_second": 257.37, |
|
"eval_steps_per_second": 6.451, |
|
"step": 18660 |
|
}, |
|
{ |
|
"epoch": 59.922928709055874, |
|
"step": 18660, |
|
"total_flos": 1.4737892295759744e+18, |
|
"train_loss": 2.0652262108118973, |
|
"train_runtime": 33416.7253, |
|
"train_samples_per_second": 111.779, |
|
"train_steps_per_second": 0.558 |
|
} |
|
], |
|
"logging_steps": 1866, |
|
"max_steps": 18660, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 60, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4737892295759744e+18, |
|
"train_batch_size": 40, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|