math-vinallama-7b-chat / trainer_state.json
Namronaldo2004's picture
Update fine-tuned model
f3e4eb3
raw
history blame
8.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 0.3415137231349945,
"learning_rate": 6.666666666666667e-05,
"loss": 0.4337,
"step": 1
},
{
"epoch": 0.04,
"grad_norm": 0.33824849128723145,
"learning_rate": 0.00013333333333333334,
"loss": 0.4472,
"step": 2
},
{
"epoch": 0.06,
"grad_norm": 0.3517794609069824,
"learning_rate": 0.0002,
"loss": 0.4119,
"step": 3
},
{
"epoch": 0.08,
"grad_norm": 0.49278953671455383,
"learning_rate": 0.00019977668786231534,
"loss": 0.4045,
"step": 4
},
{
"epoch": 0.1,
"grad_norm": 0.5396780967712402,
"learning_rate": 0.000199107748815478,
"loss": 0.4354,
"step": 5
},
{
"epoch": 0.12,
"grad_norm": 0.5213820934295654,
"learning_rate": 0.0001979961705036587,
"loss": 0.4435,
"step": 6
},
{
"epoch": 0.14,
"grad_norm": 0.42753931879997253,
"learning_rate": 0.00019644691750543767,
"loss": 0.4306,
"step": 7
},
{
"epoch": 0.16,
"grad_norm": 0.46985888481140137,
"learning_rate": 0.0001944669091607919,
"loss": 0.4558,
"step": 8
},
{
"epoch": 0.18,
"grad_norm": 0.44640442728996277,
"learning_rate": 0.00019206498866764288,
"loss": 0.4777,
"step": 9
},
{
"epoch": 0.2,
"grad_norm": 0.4198346734046936,
"learning_rate": 0.00018925188358598813,
"loss": 0.4845,
"step": 10
},
{
"epoch": 0.22,
"grad_norm": 0.43529659509658813,
"learning_rate": 0.00018604015792601396,
"loss": 0.4327,
"step": 11
},
{
"epoch": 0.24,
"grad_norm": 0.4187154471874237,
"learning_rate": 0.00018244415603417603,
"loss": 0.4682,
"step": 12
},
{
"epoch": 0.26,
"grad_norm": 0.4378308057785034,
"learning_rate": 0.0001784799385278661,
"loss": 0.5107,
"step": 13
},
{
"epoch": 0.28,
"grad_norm": 0.40074899792671204,
"learning_rate": 0.00017416521056479577,
"loss": 0.4809,
"step": 14
},
{
"epoch": 0.3,
"grad_norm": 0.4163818955421448,
"learning_rate": 0.00016951924276746425,
"loss": 0.4956,
"step": 15
},
{
"epoch": 0.32,
"grad_norm": 0.39697593450546265,
"learning_rate": 0.00016456278515588024,
"loss": 0.4907,
"step": 16
},
{
"epoch": 0.34,
"grad_norm": 0.4083729088306427,
"learning_rate": 0.00015931797447293552,
"loss": 0.4838,
"step": 17
},
{
"epoch": 0.36,
"grad_norm": 0.38315674662590027,
"learning_rate": 0.00015380823531633729,
"loss": 0.4958,
"step": 18
},
{
"epoch": 0.38,
"grad_norm": 0.37201324105262756,
"learning_rate": 0.00014805817551866838,
"loss": 0.4897,
"step": 19
},
{
"epoch": 0.4,
"grad_norm": 0.37709081172943115,
"learning_rate": 0.0001420934762428335,
"loss": 0.4692,
"step": 20
},
{
"epoch": 0.42,
"grad_norm": 0.37321537733078003,
"learning_rate": 0.00013594077728375128,
"loss": 0.5103,
"step": 21
},
{
"epoch": 0.44,
"grad_norm": 0.38839632272720337,
"learning_rate": 0.00012962755808856342,
"loss": 0.5218,
"step": 22
},
{
"epoch": 0.46,
"grad_norm": 0.37921735644340515,
"learning_rate": 0.00012318201502675285,
"loss": 0.5324,
"step": 23
},
{
"epoch": 0.48,
"grad_norm": 0.4005667567253113,
"learning_rate": 0.00011663293545831302,
"loss": 0.5168,
"step": 24
},
{
"epoch": 0.5,
"grad_norm": 0.37409457564353943,
"learning_rate": 0.00011000956916240985,
"loss": 0.4907,
"step": 25
},
{
"epoch": 0.52,
"grad_norm": 0.3958514630794525,
"learning_rate": 0.00010334149770076747,
"loss": 0.5186,
"step": 26
},
{
"epoch": 0.54,
"grad_norm": 0.38589972257614136,
"learning_rate": 9.665850229923258e-05,
"loss": 0.5282,
"step": 27
},
{
"epoch": 0.56,
"grad_norm": 0.3830341100692749,
"learning_rate": 8.999043083759017e-05,
"loss": 0.4814,
"step": 28
},
{
"epoch": 0.58,
"grad_norm": 0.37159013748168945,
"learning_rate": 8.336706454168701e-05,
"loss": 0.5096,
"step": 29
},
{
"epoch": 0.6,
"grad_norm": 0.3702056109905243,
"learning_rate": 7.681798497324716e-05,
"loss": 0.4727,
"step": 30
},
{
"epoch": 0.62,
"grad_norm": 0.3684118688106537,
"learning_rate": 7.037244191143661e-05,
"loss": 0.4695,
"step": 31
},
{
"epoch": 0.64,
"grad_norm": 0.3787296712398529,
"learning_rate": 6.405922271624874e-05,
"loss": 0.5261,
"step": 32
},
{
"epoch": 0.66,
"grad_norm": 0.39863407611846924,
"learning_rate": 5.790652375716652e-05,
"loss": 0.5312,
"step": 33
},
{
"epoch": 0.68,
"grad_norm": 0.35908952355384827,
"learning_rate": 5.1941824481331626e-05,
"loss": 0.507,
"step": 34
},
{
"epoch": 0.7,
"grad_norm": 0.37030553817749023,
"learning_rate": 4.6191764683662744e-05,
"loss": 0.4799,
"step": 35
},
{
"epoch": 0.72,
"grad_norm": 0.3842332661151886,
"learning_rate": 4.0682025527064486e-05,
"loss": 0.5104,
"step": 36
},
{
"epoch": 0.74,
"grad_norm": 0.3656119108200073,
"learning_rate": 3.543721484411976e-05,
"loss": 0.4926,
"step": 37
},
{
"epoch": 0.76,
"grad_norm": 0.3526756763458252,
"learning_rate": 3.0480757232535772e-05,
"loss": 0.4962,
"step": 38
},
{
"epoch": 0.78,
"grad_norm": 0.3747156858444214,
"learning_rate": 2.5834789435204243e-05,
"loss": 0.5222,
"step": 39
},
{
"epoch": 0.8,
"grad_norm": 0.37097883224487305,
"learning_rate": 2.1520061472133902e-05,
"loss": 0.5288,
"step": 40
},
{
"epoch": 0.82,
"grad_norm": 0.37520304322242737,
"learning_rate": 1.7555843965823992e-05,
"loss": 0.5005,
"step": 41
},
{
"epoch": 0.84,
"grad_norm": 0.34392106533050537,
"learning_rate": 1.3959842073986085e-05,
"loss": 0.4774,
"step": 42
},
{
"epoch": 0.86,
"grad_norm": 0.3575632572174072,
"learning_rate": 1.0748116414011888e-05,
"loss": 0.5033,
"step": 43
},
{
"epoch": 0.88,
"grad_norm": 0.35089874267578125,
"learning_rate": 7.935011332357112e-06,
"loss": 0.5018,
"step": 44
},
{
"epoch": 0.9,
"grad_norm": 0.3535928428173065,
"learning_rate": 5.533090839208133e-06,
"loss": 0.5141,
"step": 45
},
{
"epoch": 0.92,
"grad_norm": 0.37137070298194885,
"learning_rate": 3.5530824945623542e-06,
"loss": 0.5101,
"step": 46
},
{
"epoch": 0.94,
"grad_norm": 0.38077953457832336,
"learning_rate": 2.003829496341325e-06,
"loss": 0.5513,
"step": 47
},
{
"epoch": 0.96,
"grad_norm": 0.3571460545063019,
"learning_rate": 8.922511845219971e-07,
"loss": 0.5074,
"step": 48
},
{
"epoch": 0.98,
"grad_norm": 0.34851184487342834,
"learning_rate": 2.2331213768468363e-07,
"loss": 0.4974,
"step": 49
},
{
"epoch": 1.0,
"grad_norm": 0.3630204200744629,
"learning_rate": 0.0,
"loss": 0.52,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.058530297072845e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}