|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03819628647214854, |
|
"eval_steps": 9, |
|
"global_step": 54, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007073386383731211, |
|
"eval_loss": 1.189466118812561, |
|
"eval_runtime": 313.795, |
|
"eval_samples_per_second": 7.588, |
|
"eval_steps_per_second": 0.95, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002122015915119363, |
|
"grad_norm": 0.5069853067398071, |
|
"learning_rate": 3e-05, |
|
"loss": 1.2315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004244031830238726, |
|
"grad_norm": 0.4807300567626953, |
|
"learning_rate": 6e-05, |
|
"loss": 1.1486, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00636604774535809, |
|
"grad_norm": 0.5452137589454651, |
|
"learning_rate": 9e-05, |
|
"loss": 1.1401, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00636604774535809, |
|
"eval_loss": 0.9386758804321289, |
|
"eval_runtime": 316.2435, |
|
"eval_samples_per_second": 7.529, |
|
"eval_steps_per_second": 0.942, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.008488063660477453, |
|
"grad_norm": 0.46840620040893555, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 0.8462, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010610079575596816, |
|
"grad_norm": 0.7444979548454285, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.6378, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01273209549071618, |
|
"grad_norm": 0.4480806887149811, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 0.4438, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01273209549071618, |
|
"eval_loss": 0.3728099763393402, |
|
"eval_runtime": 316.5306, |
|
"eval_samples_per_second": 7.522, |
|
"eval_steps_per_second": 0.941, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014854111405835544, |
|
"grad_norm": 0.46977323293685913, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 0.3508, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.016976127320954906, |
|
"grad_norm": 0.33589252829551697, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 0.3184, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01909814323607427, |
|
"grad_norm": 0.31081634759902954, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 0.3063, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.01909814323607427, |
|
"eval_loss": 0.288199245929718, |
|
"eval_runtime": 316.9259, |
|
"eval_samples_per_second": 7.513, |
|
"eval_steps_per_second": 0.94, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021220159151193633, |
|
"grad_norm": 0.2740694582462311, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.2743, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.023342175066312996, |
|
"grad_norm": 0.24397042393684387, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 0.2724, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02546419098143236, |
|
"grad_norm": 0.2719847857952118, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 0.2675, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02546419098143236, |
|
"eval_loss": 0.2569960951805115, |
|
"eval_runtime": 316.3134, |
|
"eval_samples_per_second": 7.527, |
|
"eval_steps_per_second": 0.942, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.027586206896551724, |
|
"grad_norm": 0.22842727601528168, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 0.2581, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.029708222811671087, |
|
"grad_norm": 0.22034530341625214, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 0.2418, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03183023872679045, |
|
"grad_norm": 0.27058666944503784, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.2442, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03183023872679045, |
|
"eval_loss": 0.23530101776123047, |
|
"eval_runtime": 316.3177, |
|
"eval_samples_per_second": 7.527, |
|
"eval_steps_per_second": 0.942, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03395225464190981, |
|
"grad_norm": 0.24335481226444244, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 0.2388, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03607427055702918, |
|
"grad_norm": 0.19779016077518463, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 0.2253, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.03819628647214854, |
|
"grad_norm": 0.23507341742515564, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 0.2155, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.03819628647214854, |
|
"eval_loss": 0.22180727124214172, |
|
"eval_runtime": 316.2483, |
|
"eval_samples_per_second": 7.529, |
|
"eval_steps_per_second": 0.942, |
|
"step": 54 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4074574120209613e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|