|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3177966101694915, |
|
"eval_steps": 500, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00211864406779661, |
|
"grad_norm": 6772.94736433113, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 5.4426, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0211864406779661, |
|
"grad_norm": 43.76552598391156, |
|
"learning_rate": 5.263157894736842e-05, |
|
"loss": 6.5697, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0423728813559322, |
|
"grad_norm": 1.4214989301078385, |
|
"learning_rate": 0.00010526315789473683, |
|
"loss": 5.8015, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0635593220338983, |
|
"grad_norm": 1.8559172078320925, |
|
"learning_rate": 0.00015789473684210527, |
|
"loss": 4.6978, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0847457627118644, |
|
"grad_norm": 0.9523887604864526, |
|
"learning_rate": 0.00021052631578947367, |
|
"loss": 3.9616, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1059322033898305, |
|
"grad_norm": 0.5313592686196623, |
|
"learning_rate": 0.0002631578947368421, |
|
"loss": 3.2816, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1271186440677966, |
|
"grad_norm": 0.9882421975844679, |
|
"learning_rate": 0.00031578947368421053, |
|
"loss": 2.9453, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1483050847457627, |
|
"grad_norm": 1.3764980998221854, |
|
"learning_rate": 0.00036842105263157896, |
|
"loss": 2.7478, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1694915254237288, |
|
"grad_norm": 0.6188567319965684, |
|
"learning_rate": 0.00042105263157894734, |
|
"loss": 2.6778, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1906779661016949, |
|
"grad_norm": 0.45391127998861047, |
|
"learning_rate": 0.00047368421052631577, |
|
"loss": 2.6651, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.211864406779661, |
|
"grad_norm": 0.3359651886818045, |
|
"learning_rate": 0.0004999904062938913, |
|
"loss": 2.6871, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2330508474576271, |
|
"grad_norm": 0.2538059916992196, |
|
"learning_rate": 0.0004999136610628463, |
|
"loss": 2.6047, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2542372881355932, |
|
"grad_norm": 0.7420239318499638, |
|
"learning_rate": 0.0004997601941609823, |
|
"loss": 2.6122, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2754237288135593, |
|
"grad_norm": 1.0130312150097984, |
|
"learning_rate": 0.0004995300527015189, |
|
"loss": 2.8314, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2966101694915254, |
|
"grad_norm": 0.45185267289061176, |
|
"learning_rate": 0.0004992233073362051, |
|
"loss": 2.7241, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3177966101694915, |
|
"grad_norm": 1.2647337895757225, |
|
"learning_rate": 0.0004988400522336304, |
|
"loss": 2.6269, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1888, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3715483865972736.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|