|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.007707129094412331, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00015414258188824664, |
|
"grad_norm": 0.6734272241592407, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4781, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00015414258188824664, |
|
"eval_loss": 1.66375732421875, |
|
"eval_runtime": 3.2935, |
|
"eval_samples_per_second": 15.182, |
|
"eval_steps_per_second": 3.947, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003082851637764933, |
|
"grad_norm": 0.8541533350944519, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3495, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00046242774566473987, |
|
"grad_norm": 0.6524771451950073, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 1.315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006165703275529866, |
|
"grad_norm": 0.5823439955711365, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 1.3871, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007707129094412332, |
|
"grad_norm": 0.49418699741363525, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 1.3642, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009248554913294797, |
|
"grad_norm": 0.5664134621620178, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.3952, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0010789980732177264, |
|
"grad_norm": 0.4901440739631653, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 1.5234, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0012331406551059731, |
|
"grad_norm": 0.3769587576389313, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.4229, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0013872832369942196, |
|
"grad_norm": 0.4188389480113983, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 1.5131, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0015414258188824663, |
|
"grad_norm": 0.5015379190444946, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 1.5212, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.001695568400770713, |
|
"grad_norm": 0.5772421956062317, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 1.572, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0018497109826589595, |
|
"grad_norm": 0.6743692755699158, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 1.5605, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002003853564547206, |
|
"grad_norm": 0.2970338761806488, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 1.4014, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.002157996146435453, |
|
"grad_norm": 0.2780226469039917, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.155, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0023121387283236996, |
|
"grad_norm": 0.2958413362503052, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 1.273, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0024662813102119463, |
|
"grad_norm": 0.3193422853946686, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 1.2629, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0026204238921001925, |
|
"grad_norm": 0.339270681142807, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 1.3673, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0027745664739884392, |
|
"grad_norm": 0.2887856066226959, |
|
"learning_rate": 7.75e-05, |
|
"loss": 1.367, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.002928709055876686, |
|
"grad_norm": 0.3095043897628784, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 1.3631, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0030828516377649326, |
|
"grad_norm": 0.32946938276290894, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.4236, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0032369942196531793, |
|
"grad_norm": 0.33055737614631653, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 1.4734, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003391136801541426, |
|
"grad_norm": 0.3694823384284973, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 1.4547, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0035452793834296723, |
|
"grad_norm": 0.4111591875553131, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 1.5789, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.003699421965317919, |
|
"grad_norm": 0.48207613825798035, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 1.5792, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0038535645472061657, |
|
"grad_norm": 0.7138695120811462, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 1.3662, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0038535645472061657, |
|
"eval_loss": 1.3446707725524902, |
|
"eval_runtime": 3.2927, |
|
"eval_samples_per_second": 15.185, |
|
"eval_steps_per_second": 3.948, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004007707129094412, |
|
"grad_norm": 0.2780517339706421, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.1835, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.004161849710982659, |
|
"grad_norm": 0.2748948931694031, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 1.1004, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004315992292870906, |
|
"grad_norm": 0.3136284351348877, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 1.2712, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004470134874759152, |
|
"grad_norm": 0.2965591847896576, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 1.2592, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.004624277456647399, |
|
"grad_norm": 0.3082920014858246, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 1.2643, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004778420038535645, |
|
"grad_norm": 0.3298587501049042, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 1.4209, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0049325626204238925, |
|
"grad_norm": 0.30679869651794434, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.3488, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005086705202312139, |
|
"grad_norm": 0.3360414505004883, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 1.4167, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005240847784200385, |
|
"grad_norm": 0.34086665511131287, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.5297, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005394990366088632, |
|
"grad_norm": 0.39216431975364685, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 1.5874, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0055491329479768784, |
|
"grad_norm": 0.4349631667137146, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.6172, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005703275529865126, |
|
"grad_norm": 0.5350357294082642, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 1.496, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005857418111753372, |
|
"grad_norm": 0.2876313328742981, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.209, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.006011560693641618, |
|
"grad_norm": 0.2404983937740326, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 1.2665, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006165703275529865, |
|
"grad_norm": 0.266658216714859, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 1.357, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0063198458574181115, |
|
"grad_norm": 0.29665255546569824, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 1.2722, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006473988439306359, |
|
"grad_norm": 0.2774446904659271, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.2849, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006628131021194605, |
|
"grad_norm": 0.2746015191078186, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 1.2507, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006782273603082852, |
|
"grad_norm": 0.29231736063957214, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.2986, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006936416184971098, |
|
"grad_norm": 0.31806090474128723, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 1.4726, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0070905587668593445, |
|
"grad_norm": 0.3273776173591614, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.4034, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007244701348747592, |
|
"grad_norm": 0.3434179127216339, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 1.4006, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007398843930635838, |
|
"grad_norm": 0.38592585921287537, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.487, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007552986512524085, |
|
"grad_norm": 0.4318895936012268, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 1.4787, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007707129094412331, |
|
"grad_norm": 0.569764256477356, |
|
"learning_rate": 1e-05, |
|
"loss": 1.4057, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007707129094412331, |
|
"eval_loss": 1.3222453594207764, |
|
"eval_runtime": 3.3045, |
|
"eval_samples_per_second": 15.131, |
|
"eval_steps_per_second": 3.934, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.971813667564421e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|