|
{ |
|
"best_metric": 0.8793088793754578, |
|
"best_model_checkpoint": "models/checkpoints/checkpoint-5500", |
|
"epoch": 0.4509582863585118, |
|
"global_step": 5500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.998360118071499e-05, |
|
"loss": 1.518, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9967202361429978e-05, |
|
"loss": 1.2325, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9950803542144966e-05, |
|
"loss": 1.1664, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9934404722859955e-05, |
|
"loss": 1.1746, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9918005903574943e-05, |
|
"loss": 1.1251, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 0.964438259601593, |
|
"eval_runtime": 267.7236, |
|
"eval_samples_per_second": 255.906, |
|
"eval_steps_per_second": 15.994, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9901607084289934e-05, |
|
"loss": 1.1041, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.988520826500492e-05, |
|
"loss": 1.0817, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.986880944571991e-05, |
|
"loss": 1.0594, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.98524106264349e-05, |
|
"loss": 1.0642, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9836011807149887e-05, |
|
"loss": 1.0453, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_loss": 0.9285051226615906, |
|
"eval_runtime": 253.7626, |
|
"eval_samples_per_second": 269.985, |
|
"eval_steps_per_second": 16.874, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9819612987864875e-05, |
|
"loss": 1.0486, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9803214168579864e-05, |
|
"loss": 1.0549, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9786815349294852e-05, |
|
"loss": 1.0639, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.977041653000984e-05, |
|
"loss": 1.0354, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9754017710724828e-05, |
|
"loss": 1.0393, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_loss": 0.9202534556388855, |
|
"eval_runtime": 264.8239, |
|
"eval_samples_per_second": 258.708, |
|
"eval_steps_per_second": 16.169, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9737618891439817e-05, |
|
"loss": 1.0381, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9721220072154808e-05, |
|
"loss": 1.0672, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9704821252869793e-05, |
|
"loss": 1.0335, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9688422433584785e-05, |
|
"loss": 1.0239, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9672023614299773e-05, |
|
"loss": 1.0012, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_loss": 0.9098692536354065, |
|
"eval_runtime": 264.5198, |
|
"eval_samples_per_second": 259.005, |
|
"eval_steps_per_second": 16.188, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.965562479501476e-05, |
|
"loss": 1.0146, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.963922597572975e-05, |
|
"loss": 1.0502, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9622827156444737e-05, |
|
"loss": 1.0062, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9606428337159726e-05, |
|
"loss": 1.0144, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9590029517874714e-05, |
|
"loss": 0.9826, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 0.9166184067726135, |
|
"eval_runtime": 256.5893, |
|
"eval_samples_per_second": 267.01, |
|
"eval_steps_per_second": 16.688, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9573630698589702e-05, |
|
"loss": 1.0269, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9557231879304694e-05, |
|
"loss": 0.9898, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.954083306001968e-05, |
|
"loss": 1.0248, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.952443424073467e-05, |
|
"loss": 0.9996, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.950803542144966e-05, |
|
"loss": 0.9595, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.9191610217094421, |
|
"eval_runtime": 272.1388, |
|
"eval_samples_per_second": 251.754, |
|
"eval_steps_per_second": 15.735, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9491636602164647e-05, |
|
"loss": 0.9873, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9475237782879635e-05, |
|
"loss": 0.9667, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9458838963594623e-05, |
|
"loss": 0.9666, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.944244014430961e-05, |
|
"loss": 1.0102, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.94260413250246e-05, |
|
"loss": 0.9511, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 0.9013209342956543, |
|
"eval_runtime": 267.0529, |
|
"eval_samples_per_second": 256.548, |
|
"eval_steps_per_second": 16.034, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.9409642505739588e-05, |
|
"loss": 0.9927, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.9393243686454576e-05, |
|
"loss": 0.9507, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.9376844867169567e-05, |
|
"loss": 1.0049, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.9360446047884552e-05, |
|
"loss": 0.9385, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.9344047228599544e-05, |
|
"loss": 0.9644, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 0.8957135081291199, |
|
"eval_runtime": 260.7988, |
|
"eval_samples_per_second": 262.701, |
|
"eval_steps_per_second": 16.419, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.932764840931453e-05, |
|
"loss": 0.9293, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.931124959002952e-05, |
|
"loss": 0.9298, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.929485077074451e-05, |
|
"loss": 0.9218, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.9278451951459497e-05, |
|
"loss": 0.9523, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.9262053132174485e-05, |
|
"loss": 0.9079, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 0.9050074815750122, |
|
"eval_runtime": 264.4439, |
|
"eval_samples_per_second": 259.079, |
|
"eval_steps_per_second": 16.192, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.9245654312889473e-05, |
|
"loss": 0.9437, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.922925549360446e-05, |
|
"loss": 0.9395, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.921285667431945e-05, |
|
"loss": 0.969, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.9196457855034438e-05, |
|
"loss": 0.926, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.9180059035749426e-05, |
|
"loss": 0.9178, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_loss": 0.8956226706504822, |
|
"eval_runtime": 259.9134, |
|
"eval_samples_per_second": 263.595, |
|
"eval_steps_per_second": 16.475, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.9163660216464418e-05, |
|
"loss": 0.9697, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9147261397179403e-05, |
|
"loss": 0.9448, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9130862577894394e-05, |
|
"loss": 0.9118, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.911446375860938e-05, |
|
"loss": 0.9321, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.909806493932437e-05, |
|
"loss": 0.9289, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_loss": 0.8793088793754578, |
|
"eval_runtime": 261.4397, |
|
"eval_samples_per_second": 262.057, |
|
"eval_steps_per_second": 16.379, |
|
"step": 5500 |
|
} |
|
], |
|
"max_steps": 121960, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.922426903232e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|