|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.5073170731707317, |
|
"eval_steps": 500, |
|
"global_step": 260, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01951219512195122, |
|
"grad_norm": 0.14488260447978973, |
|
"learning_rate": 0.00019934683213585893, |
|
"loss": 0.6356, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03902439024390244, |
|
"grad_norm": 0.1634250283241272, |
|
"learning_rate": 0.00019804049640757677, |
|
"loss": 0.4368, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05853658536585366, |
|
"grad_norm": 0.1504327356815338, |
|
"learning_rate": 0.0001967341606792946, |
|
"loss": 0.407, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07804878048780488, |
|
"grad_norm": 0.16282524168491364, |
|
"learning_rate": 0.00019542782495101242, |
|
"loss": 0.3788, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0975609756097561, |
|
"grad_norm": 0.17505913972854614, |
|
"learning_rate": 0.00019412148922273026, |
|
"loss": 0.3705, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11707317073170732, |
|
"grad_norm": 0.18271876871585846, |
|
"learning_rate": 0.00019281515349444807, |
|
"loss": 0.3654, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13658536585365855, |
|
"grad_norm": 0.1547907590866089, |
|
"learning_rate": 0.0001915088177661659, |
|
"loss": 0.3845, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15609756097560976, |
|
"grad_norm": 0.1806977242231369, |
|
"learning_rate": 0.00019020248203788375, |
|
"loss": 0.362, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.17560975609756097, |
|
"grad_norm": 0.16502083837985992, |
|
"learning_rate": 0.00018889614630960156, |
|
"loss": 0.3436, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1951219512195122, |
|
"grad_norm": 0.15947739779949188, |
|
"learning_rate": 0.0001875898105813194, |
|
"loss": 0.3221, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2146341463414634, |
|
"grad_norm": 0.16028478741645813, |
|
"learning_rate": 0.00018628347485303724, |
|
"loss": 0.3291, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.23414634146341465, |
|
"grad_norm": 0.16755621135234833, |
|
"learning_rate": 0.00018497713912475508, |
|
"loss": 0.3454, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.25365853658536586, |
|
"grad_norm": 0.17687192559242249, |
|
"learning_rate": 0.00018367080339647292, |
|
"loss": 0.3265, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2731707317073171, |
|
"grad_norm": 0.12853658199310303, |
|
"learning_rate": 0.00018236446766819073, |
|
"loss": 0.3136, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2926829268292683, |
|
"grad_norm": 0.15641653537750244, |
|
"learning_rate": 0.00018105813193990857, |
|
"loss": 0.3124, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3121951219512195, |
|
"grad_norm": 0.1840222179889679, |
|
"learning_rate": 0.00017975179621162638, |
|
"loss": 0.3308, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.33170731707317075, |
|
"grad_norm": 0.16983111202716827, |
|
"learning_rate": 0.00017844546048334422, |
|
"loss": 0.3116, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.35121951219512193, |
|
"grad_norm": 0.18679502606391907, |
|
"learning_rate": 0.00017713912475506206, |
|
"loss": 0.3458, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.37073170731707317, |
|
"grad_norm": 0.1655397266149521, |
|
"learning_rate": 0.0001758327890267799, |
|
"loss": 0.3041, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.16899655759334564, |
|
"learning_rate": 0.00017452645329849774, |
|
"loss": 0.3179, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4097560975609756, |
|
"grad_norm": 0.14320065081119537, |
|
"learning_rate": 0.00017322011757021555, |
|
"loss": 0.2896, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4292682926829268, |
|
"grad_norm": 0.18079160153865814, |
|
"learning_rate": 0.0001719137818419334, |
|
"loss": 0.3121, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.44878048780487806, |
|
"grad_norm": 0.15252567827701569, |
|
"learning_rate": 0.00017060744611365123, |
|
"loss": 0.3054, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4682926829268293, |
|
"grad_norm": 0.1688212901353836, |
|
"learning_rate": 0.00016930111038536904, |
|
"loss": 0.316, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4878048780487805, |
|
"grad_norm": 0.1604667603969574, |
|
"learning_rate": 0.00016799477465708688, |
|
"loss": 0.3174, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5073170731707317, |
|
"grad_norm": 0.19493722915649414, |
|
"learning_rate": 0.0001666884389288047, |
|
"loss": 0.307, |
|
"step": 260 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1536, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7416088186290176e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|