|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6699636307656782, |
|
"eval_steps": 500, |
|
"global_step": 272, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.999969914479768e-05, |
|
"loss": 1.1295, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9992479525042305e-05, |
|
"loss": 1.1199, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.996992941167792e-05, |
|
"loss": 1.1063, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9932383577419432e-05, |
|
"loss": 1.0914, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9879898494768093e-05, |
|
"loss": 1.094, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9812553106273848e-05, |
|
"loss": 1.0658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.973044870579824e-05, |
|
"loss": 1.0527, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9633708786158803e-05, |
|
"loss": 1.0503, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9522478853384154e-05, |
|
"loss": 1.0305, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 1.0268, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9257239692688907e-05, |
|
"loss": 1.0291, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9103629409661468e-05, |
|
"loss": 1.02, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.8936326403234125e-05, |
|
"loss": 1.0127, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8755582313020912e-05, |
|
"loss": 1.0078, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.8561668995302668e-05, |
|
"loss": 1.0003, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8354878114129368e-05, |
|
"loss": 1.0081, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.8135520702629677e-05, |
|
"loss": 1.0157, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.7903926695187595e-05, |
|
"loss": 1.0118, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 1.0011, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.740544013109005e-05, |
|
"loss": 0.9919, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.7139297345578992e-05, |
|
"loss": 1.0025, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.686241637868734e-05, |
|
"loss": 1.0187, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.657521368569064e-05, |
|
"loss": 0.9959, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.627812124672099e-05, |
|
"loss": 0.9936, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.5971585917027864e-05, |
|
"loss": 0.9981, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.5656068754865388e-05, |
|
"loss": 0.9954, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.5332044328016916e-05, |
|
"loss": 1.0044, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 1.0032, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.4660435197025391e-05, |
|
"loss": 0.9901, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.4313860656812537e-05, |
|
"loss": 0.9965, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.396079766039157e-05, |
|
"loss": 0.9915, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.3601777248047105e-05, |
|
"loss": 0.9861, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.3237339420583213e-05, |
|
"loss": 0.9757, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.2868032327110904e-05, |
|
"loss": 0.9944, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.2494411440579814e-05, |
|
"loss": 0.9963, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.211703872229411e-05, |
|
"loss": 0.9817, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 0.9881, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.1353312997501313e-05, |
|
"loss": 0.9965, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.0968108707031792e-05, |
|
"loss": 0.9921, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.0581448289104759e-05, |
|
"loss": 0.9934, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.0193913317718245e-05, |
|
"loss": 0.9823, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.806086682281759e-06, |
|
"loss": 0.988, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.418551710895243e-06, |
|
"loss": 0.9866, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.03189129296821e-06, |
|
"loss": 0.9983, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 8.646687002498692e-06, |
|
"loss": 0.9866, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 0.9918, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 7.882961277705897e-06, |
|
"loss": 0.9901, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.505588559420188e-06, |
|
"loss": 0.987, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.131967672889101e-06, |
|
"loss": 0.9941, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 6.762660579416791e-06, |
|
"loss": 0.9968, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 6.3982227519528986e-06, |
|
"loss": 0.9842, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 6.039202339608432e-06, |
|
"loss": 0.9788, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.686139343187468e-06, |
|
"loss": 0.9929, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 5.339564802974615e-06, |
|
"loss": 0.9834, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.9866, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_loss": 0.9899678230285645, |
|
"eval_runtime": 3893.4345, |
|
"eval_samples_per_second": 5.936, |
|
"eval_steps_per_second": 0.742, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"step": 272, |
|
"total_flos": 1.2275875027210994e+19, |
|
"train_loss": 1.0095771827241953, |
|
"train_runtime": 116282.0771, |
|
"train_samples_per_second": 1.788, |
|
"train_steps_per_second": 0.003 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 405, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 1.2275875027210994e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|