|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.7944915254237288, |
|
"eval_steps": 500, |
|
"global_step": 375, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00211864406779661, |
|
"grad_norm": 6772.94736433113, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 5.4426, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0211864406779661, |
|
"grad_norm": 43.76552598391156, |
|
"learning_rate": 5.263157894736842e-05, |
|
"loss": 6.5697, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0423728813559322, |
|
"grad_norm": 1.4214989301078385, |
|
"learning_rate": 0.00010526315789473683, |
|
"loss": 5.8015, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0635593220338983, |
|
"grad_norm": 1.8559172078320925, |
|
"learning_rate": 0.00015789473684210527, |
|
"loss": 4.6978, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0847457627118644, |
|
"grad_norm": 0.9523887604864526, |
|
"learning_rate": 0.00021052631578947367, |
|
"loss": 3.9616, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1059322033898305, |
|
"grad_norm": 0.5313592686196623, |
|
"learning_rate": 0.0002631578947368421, |
|
"loss": 3.2816, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1271186440677966, |
|
"grad_norm": 0.9882421975844679, |
|
"learning_rate": 0.00031578947368421053, |
|
"loss": 2.9453, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.1483050847457627, |
|
"grad_norm": 1.3764980998221854, |
|
"learning_rate": 0.00036842105263157896, |
|
"loss": 2.7478, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1694915254237288, |
|
"grad_norm": 0.6188567319965684, |
|
"learning_rate": 0.00042105263157894734, |
|
"loss": 2.6778, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1906779661016949, |
|
"grad_norm": 0.45391127998861047, |
|
"learning_rate": 0.00047368421052631577, |
|
"loss": 2.6651, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.211864406779661, |
|
"grad_norm": 0.3359651886818045, |
|
"learning_rate": 0.0004999904062938913, |
|
"loss": 2.6871, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2330508474576271, |
|
"grad_norm": 0.2538059916992196, |
|
"learning_rate": 0.0004999136610628463, |
|
"loss": 2.6047, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2542372881355932, |
|
"grad_norm": 0.7420239318499638, |
|
"learning_rate": 0.0004997601941609823, |
|
"loss": 2.6122, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2754237288135593, |
|
"grad_norm": 1.0130312150097984, |
|
"learning_rate": 0.0004995300527015189, |
|
"loss": 2.8314, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.2966101694915254, |
|
"grad_norm": 0.45185267289061176, |
|
"learning_rate": 0.0004992233073362051, |
|
"loss": 2.7241, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3177966101694915, |
|
"grad_norm": 1.2647337895757225, |
|
"learning_rate": 0.0004988400522336304, |
|
"loss": 2.6269, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3389830508474576, |
|
"grad_norm": 1.8112639454685833, |
|
"learning_rate": 0.0004983804050503152, |
|
"loss": 2.5217, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3601694915254237, |
|
"grad_norm": 3.7153319991642295, |
|
"learning_rate": 0.0004978445068945918, |
|
"loss": 2.6363, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3813559322033898, |
|
"grad_norm": 1.791954534899048, |
|
"learning_rate": 0.0004972325222832848, |
|
"loss": 2.6156, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.4025423728813559, |
|
"grad_norm": 0.30066185600399886, |
|
"learning_rate": 0.0004965446390912051, |
|
"loss": 2.5443, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.423728813559322, |
|
"grad_norm": 0.155337614316216, |
|
"learning_rate": 0.0004957810684934746, |
|
"loss": 2.4791, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4449152542372881, |
|
"grad_norm": 0.10527493629733332, |
|
"learning_rate": 0.0004949420449006968, |
|
"loss": 2.4345, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4661016949152542, |
|
"grad_norm": 0.06207368975658736, |
|
"learning_rate": 0.0004940278258869937, |
|
"loss": 2.4284, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4872881355932203, |
|
"grad_norm": 0.06174468192832609, |
|
"learning_rate": 0.0004930386921109333, |
|
"loss": 2.3855, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5084745762711864, |
|
"grad_norm": 0.04921797570119065, |
|
"learning_rate": 0.0004919749472293693, |
|
"loss": 2.3648, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5296610169491526, |
|
"grad_norm": 0.06272467077360039, |
|
"learning_rate": 0.00049083691780422, |
|
"loss": 2.3618, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5508474576271186, |
|
"grad_norm": 0.06952127682493715, |
|
"learning_rate": 0.0004896249532022172, |
|
"loss": 2.3344, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5720338983050848, |
|
"grad_norm": 0.05291425745739597, |
|
"learning_rate": 0.0004883394254876522, |
|
"loss": 2.3401, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5932203389830508, |
|
"grad_norm": 0.09403878675057362, |
|
"learning_rate": 0.0004869807293081555, |
|
"loss": 2.3351, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.614406779661017, |
|
"grad_norm": 0.07509078154330293, |
|
"learning_rate": 0.00048554928177354254, |
|
"loss": 2.3067, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.635593220338983, |
|
"grad_norm": 0.1705698396439175, |
|
"learning_rate": 0.0004840455223277639, |
|
"loss": 2.3114, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6567796610169492, |
|
"grad_norm": 0.09664349408312269, |
|
"learning_rate": 0.0004824699126139995, |
|
"loss": 2.3059, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6779661016949152, |
|
"grad_norm": 0.08308196121963289, |
|
"learning_rate": 0.0004808229363329374, |
|
"loss": 2.2926, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6991525423728814, |
|
"grad_norm": 0.0669444065304248, |
|
"learning_rate": 0.0004791050990942811, |
|
"loss": 2.2862, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7203389830508474, |
|
"grad_norm": 0.05853715985139967, |
|
"learning_rate": 0.0004773169282615311, |
|
"loss": 2.2736, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7415254237288136, |
|
"grad_norm": 0.056477614254721926, |
|
"learning_rate": 0.00047545897279008845, |
|
"loss": 2.2715, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7627118644067796, |
|
"grad_norm": 0.0870456690332402, |
|
"learning_rate": 0.000473531803058729, |
|
"loss": 2.2543, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7838983050847458, |
|
"grad_norm": 0.0954901066459511, |
|
"learning_rate": 0.0004715360106945015, |
|
"loss": 2.2752, |
|
"step": 370 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1888, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9293357893287936.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|