BioMistralCancer-withLoRA / trainer_state.json
sayyid14's picture
Upload 11 files
c5c9d71 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 45500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.054945054945054944,
"grad_norm": 1.780437707901001,
"learning_rate": 0.0001978021978021978,
"loss": 1.5906,
"step": 500
},
{
"epoch": 0.10989010989010989,
"grad_norm": 2.4972989559173584,
"learning_rate": 0.00019560439560439562,
"loss": 1.5616,
"step": 1000
},
{
"epoch": 0.16483516483516483,
"grad_norm": 1.6102287769317627,
"learning_rate": 0.00019340659340659342,
"loss": 1.5549,
"step": 1500
},
{
"epoch": 0.21978021978021978,
"grad_norm": 1.6323072910308838,
"learning_rate": 0.00019120879120879122,
"loss": 1.5446,
"step": 2000
},
{
"epoch": 0.27472527472527475,
"grad_norm": 1.6228586435317993,
"learning_rate": 0.00018901098901098903,
"loss": 1.5549,
"step": 2500
},
{
"epoch": 0.32967032967032966,
"grad_norm": 1.433160662651062,
"learning_rate": 0.00018681318681318683,
"loss": 1.5521,
"step": 3000
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.4724050760269165,
"learning_rate": 0.00018461538461538463,
"loss": 1.5335,
"step": 3500
},
{
"epoch": 0.43956043956043955,
"grad_norm": 1.2267966270446777,
"learning_rate": 0.0001824175824175824,
"loss": 1.5359,
"step": 4000
},
{
"epoch": 0.4945054945054945,
"grad_norm": 1.3739681243896484,
"learning_rate": 0.00018021978021978024,
"loss": 1.5362,
"step": 4500
},
{
"epoch": 0.5494505494505495,
"grad_norm": 1.3636854887008667,
"learning_rate": 0.00017802197802197802,
"loss": 1.5257,
"step": 5000
},
{
"epoch": 0.6043956043956044,
"grad_norm": 1.9858155250549316,
"learning_rate": 0.00017582417582417582,
"loss": 1.5378,
"step": 5500
},
{
"epoch": 0.6593406593406593,
"grad_norm": 1.6531416177749634,
"learning_rate": 0.00017362637362637365,
"loss": 1.5239,
"step": 6000
},
{
"epoch": 0.7142857142857143,
"grad_norm": 1.638098120689392,
"learning_rate": 0.00017142857142857143,
"loss": 1.5331,
"step": 6500
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.8147164583206177,
"learning_rate": 0.00016923076923076923,
"loss": 1.5291,
"step": 7000
},
{
"epoch": 0.8241758241758241,
"grad_norm": 1.4330575466156006,
"learning_rate": 0.00016703296703296706,
"loss": 1.5328,
"step": 7500
},
{
"epoch": 0.8791208791208791,
"grad_norm": 1.3865350484848022,
"learning_rate": 0.00016483516483516484,
"loss": 1.5266,
"step": 8000
},
{
"epoch": 0.9340659340659341,
"grad_norm": 1.8921843767166138,
"learning_rate": 0.00016263736263736264,
"loss": 1.5323,
"step": 8500
},
{
"epoch": 0.989010989010989,
"grad_norm": 2.120455026626587,
"learning_rate": 0.00016043956043956044,
"loss": 1.5254,
"step": 9000
},
{
"epoch": 1.043956043956044,
"grad_norm": 1.6023536920547485,
"learning_rate": 0.00015824175824175824,
"loss": 1.4525,
"step": 9500
},
{
"epoch": 1.098901098901099,
"grad_norm": 1.4707773923873901,
"learning_rate": 0.00015604395604395605,
"loss": 1.4538,
"step": 10000
},
{
"epoch": 1.1538461538461537,
"grad_norm": 1.7984991073608398,
"learning_rate": 0.00015384615384615385,
"loss": 1.4421,
"step": 10500
},
{
"epoch": 1.2087912087912087,
"grad_norm": 1.70187509059906,
"learning_rate": 0.00015164835164835165,
"loss": 1.4454,
"step": 11000
},
{
"epoch": 1.2637362637362637,
"grad_norm": 2.5541532039642334,
"learning_rate": 0.00014945054945054946,
"loss": 1.4512,
"step": 11500
},
{
"epoch": 1.3186813186813187,
"grad_norm": 1.5920054912567139,
"learning_rate": 0.00014725274725274726,
"loss": 1.4557,
"step": 12000
},
{
"epoch": 1.3736263736263736,
"grad_norm": 2.0435898303985596,
"learning_rate": 0.00014505494505494506,
"loss": 1.4496,
"step": 12500
},
{
"epoch": 1.4285714285714286,
"grad_norm": 2.3308217525482178,
"learning_rate": 0.00014285714285714287,
"loss": 1.437,
"step": 13000
},
{
"epoch": 1.4835164835164836,
"grad_norm": 2.7433712482452393,
"learning_rate": 0.00014065934065934067,
"loss": 1.437,
"step": 13500
},
{
"epoch": 1.5384615384615383,
"grad_norm": 2.6499412059783936,
"learning_rate": 0.00013846153846153847,
"loss": 1.4419,
"step": 14000
},
{
"epoch": 1.5934065934065935,
"grad_norm": 2.6177027225494385,
"learning_rate": 0.00013626373626373628,
"loss": 1.4408,
"step": 14500
},
{
"epoch": 1.6483516483516483,
"grad_norm": 1.9484443664550781,
"learning_rate": 0.00013406593406593405,
"loss": 1.4617,
"step": 15000
},
{
"epoch": 1.7032967032967035,
"grad_norm": 2.649055242538452,
"learning_rate": 0.00013186813186813188,
"loss": 1.4482,
"step": 15500
},
{
"epoch": 1.7582417582417582,
"grad_norm": 1.9473294019699097,
"learning_rate": 0.0001296703296703297,
"loss": 1.4358,
"step": 16000
},
{
"epoch": 1.8131868131868132,
"grad_norm": 2.2678744792938232,
"learning_rate": 0.00012747252747252746,
"loss": 1.4333,
"step": 16500
},
{
"epoch": 1.8681318681318682,
"grad_norm": 1.629854440689087,
"learning_rate": 0.00012527472527472527,
"loss": 1.4305,
"step": 17000
},
{
"epoch": 1.9230769230769231,
"grad_norm": 2.318514823913574,
"learning_rate": 0.0001230769230769231,
"loss": 1.4292,
"step": 17500
},
{
"epoch": 1.978021978021978,
"grad_norm": 2.1276426315307617,
"learning_rate": 0.00012087912087912087,
"loss": 1.435,
"step": 18000
},
{
"epoch": 2.032967032967033,
"grad_norm": 2.3333852291107178,
"learning_rate": 0.00011868131868131869,
"loss": 1.3761,
"step": 18500
},
{
"epoch": 2.087912087912088,
"grad_norm": 3.305957078933716,
"learning_rate": 0.0001164835164835165,
"loss": 1.3318,
"step": 19000
},
{
"epoch": 2.142857142857143,
"grad_norm": 5.382218837738037,
"learning_rate": 0.00011428571428571428,
"loss": 1.3463,
"step": 19500
},
{
"epoch": 2.197802197802198,
"grad_norm": 3.35638427734375,
"learning_rate": 0.0001120879120879121,
"loss": 1.3418,
"step": 20000
},
{
"epoch": 2.2527472527472527,
"grad_norm": 3.04825496673584,
"learning_rate": 0.0001098901098901099,
"loss": 1.3568,
"step": 20500
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.9136993885040283,
"learning_rate": 0.0001076923076923077,
"loss": 1.3507,
"step": 21000
},
{
"epoch": 2.3626373626373627,
"grad_norm": 2.5451395511627197,
"learning_rate": 0.0001054945054945055,
"loss": 1.353,
"step": 21500
},
{
"epoch": 2.4175824175824174,
"grad_norm": 2.779684066772461,
"learning_rate": 0.00010329670329670331,
"loss": 1.3456,
"step": 22000
},
{
"epoch": 2.4725274725274726,
"grad_norm": 3.8211276531219482,
"learning_rate": 0.0001010989010989011,
"loss": 1.3519,
"step": 22500
},
{
"epoch": 2.5274725274725274,
"grad_norm": 2.877223253250122,
"learning_rate": 9.89010989010989e-05,
"loss": 1.3506,
"step": 23000
},
{
"epoch": 2.5824175824175826,
"grad_norm": 2.53200101852417,
"learning_rate": 9.670329670329671e-05,
"loss": 1.3564,
"step": 23500
},
{
"epoch": 2.6373626373626373,
"grad_norm": 3.249028205871582,
"learning_rate": 9.450549450549451e-05,
"loss": 1.3393,
"step": 24000
},
{
"epoch": 2.6923076923076925,
"grad_norm": 2.819939613342285,
"learning_rate": 9.230769230769232e-05,
"loss": 1.3413,
"step": 24500
},
{
"epoch": 2.7472527472527473,
"grad_norm": 3.4391469955444336,
"learning_rate": 9.010989010989012e-05,
"loss": 1.3517,
"step": 25000
},
{
"epoch": 2.802197802197802,
"grad_norm": 2.9319329261779785,
"learning_rate": 8.791208791208791e-05,
"loss": 1.3362,
"step": 25500
},
{
"epoch": 2.857142857142857,
"grad_norm": 2.257335901260376,
"learning_rate": 8.571428571428571e-05,
"loss": 1.3402,
"step": 26000
},
{
"epoch": 2.912087912087912,
"grad_norm": 2.6121294498443604,
"learning_rate": 8.351648351648353e-05,
"loss": 1.3615,
"step": 26500
},
{
"epoch": 2.967032967032967,
"grad_norm": 2.716127395629883,
"learning_rate": 8.131868131868132e-05,
"loss": 1.3418,
"step": 27000
},
{
"epoch": 3.021978021978022,
"grad_norm": 2.227154493331909,
"learning_rate": 7.912087912087912e-05,
"loss": 1.3199,
"step": 27500
},
{
"epoch": 3.076923076923077,
"grad_norm": 3.0606658458709717,
"learning_rate": 7.692307692307693e-05,
"loss": 1.2553,
"step": 28000
},
{
"epoch": 3.131868131868132,
"grad_norm": 2.8261947631835938,
"learning_rate": 7.472527472527473e-05,
"loss": 1.2688,
"step": 28500
},
{
"epoch": 3.186813186813187,
"grad_norm": 3.738393783569336,
"learning_rate": 7.252747252747253e-05,
"loss": 1.2513,
"step": 29000
},
{
"epoch": 3.241758241758242,
"grad_norm": 2.5947108268737793,
"learning_rate": 7.032967032967034e-05,
"loss": 1.2757,
"step": 29500
},
{
"epoch": 3.2967032967032965,
"grad_norm": 2.707387924194336,
"learning_rate": 6.813186813186814e-05,
"loss": 1.274,
"step": 30000
},
{
"epoch": 3.3516483516483517,
"grad_norm": 3.4624383449554443,
"learning_rate": 6.593406593406594e-05,
"loss": 1.2702,
"step": 30500
},
{
"epoch": 3.4065934065934065,
"grad_norm": 3.7795355319976807,
"learning_rate": 6.373626373626373e-05,
"loss": 1.2409,
"step": 31000
},
{
"epoch": 3.4615384615384617,
"grad_norm": 3.521811008453369,
"learning_rate": 6.153846153846155e-05,
"loss": 1.267,
"step": 31500
},
{
"epoch": 3.5164835164835164,
"grad_norm": 2.7402503490448,
"learning_rate": 5.9340659340659345e-05,
"loss": 1.2641,
"step": 32000
},
{
"epoch": 3.571428571428571,
"grad_norm": 2.937671661376953,
"learning_rate": 5.714285714285714e-05,
"loss": 1.2626,
"step": 32500
},
{
"epoch": 3.6263736263736264,
"grad_norm": 2.886845350265503,
"learning_rate": 5.494505494505495e-05,
"loss": 1.269,
"step": 33000
},
{
"epoch": 3.6813186813186816,
"grad_norm": 3.9152579307556152,
"learning_rate": 5.274725274725275e-05,
"loss": 1.2659,
"step": 33500
},
{
"epoch": 3.7362637362637363,
"grad_norm": 2.7805376052856445,
"learning_rate": 5.054945054945055e-05,
"loss": 1.2523,
"step": 34000
},
{
"epoch": 3.791208791208791,
"grad_norm": 3.7305562496185303,
"learning_rate": 4.8351648351648355e-05,
"loss": 1.2528,
"step": 34500
},
{
"epoch": 3.8461538461538463,
"grad_norm": 3.0859031677246094,
"learning_rate": 4.615384615384616e-05,
"loss": 1.273,
"step": 35000
},
{
"epoch": 3.901098901098901,
"grad_norm": 4.31486177444458,
"learning_rate": 4.3956043956043955e-05,
"loss": 1.2739,
"step": 35500
},
{
"epoch": 3.956043956043956,
"grad_norm": 2.46838116645813,
"learning_rate": 4.1758241758241765e-05,
"loss": 1.2555,
"step": 36000
},
{
"epoch": 4.010989010989011,
"grad_norm": 3.3113248348236084,
"learning_rate": 3.956043956043956e-05,
"loss": 1.2388,
"step": 36500
},
{
"epoch": 4.065934065934066,
"grad_norm": 5.830134868621826,
"learning_rate": 3.7362637362637365e-05,
"loss": 1.1952,
"step": 37000
},
{
"epoch": 4.1208791208791204,
"grad_norm": 2.97209095954895,
"learning_rate": 3.516483516483517e-05,
"loss": 1.1902,
"step": 37500
},
{
"epoch": 4.175824175824176,
"grad_norm": 3.9511055946350098,
"learning_rate": 3.296703296703297e-05,
"loss": 1.1837,
"step": 38000
},
{
"epoch": 4.230769230769231,
"grad_norm": 6.446171760559082,
"learning_rate": 3.0769230769230774e-05,
"loss": 1.1888,
"step": 38500
},
{
"epoch": 4.285714285714286,
"grad_norm": 4.0232744216918945,
"learning_rate": 2.857142857142857e-05,
"loss": 1.1752,
"step": 39000
},
{
"epoch": 4.34065934065934,
"grad_norm": 4.632090091705322,
"learning_rate": 2.6373626373626374e-05,
"loss": 1.1964,
"step": 39500
},
{
"epoch": 4.395604395604396,
"grad_norm": 3.9423835277557373,
"learning_rate": 2.4175824175824177e-05,
"loss": 1.1861,
"step": 40000
},
{
"epoch": 4.450549450549451,
"grad_norm": 4.528479099273682,
"learning_rate": 2.1978021978021977e-05,
"loss": 1.1999,
"step": 40500
},
{
"epoch": 4.5054945054945055,
"grad_norm": 4.554440021514893,
"learning_rate": 1.978021978021978e-05,
"loss": 1.2045,
"step": 41000
},
{
"epoch": 4.56043956043956,
"grad_norm": 4.417350769042969,
"learning_rate": 1.7582417582417584e-05,
"loss": 1.2074,
"step": 41500
},
{
"epoch": 4.615384615384615,
"grad_norm": 4.048484802246094,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.203,
"step": 42000
},
{
"epoch": 4.670329670329671,
"grad_norm": 4.659012317657471,
"learning_rate": 1.3186813186813187e-05,
"loss": 1.1917,
"step": 42500
},
{
"epoch": 4.725274725274725,
"grad_norm": 3.7058045864105225,
"learning_rate": 1.0989010989010989e-05,
"loss": 1.1981,
"step": 43000
},
{
"epoch": 4.78021978021978,
"grad_norm": 3.214338779449463,
"learning_rate": 8.791208791208792e-06,
"loss": 1.1868,
"step": 43500
},
{
"epoch": 4.835164835164835,
"grad_norm": 3.2473630905151367,
"learning_rate": 6.5934065934065935e-06,
"loss": 1.1979,
"step": 44000
},
{
"epoch": 4.8901098901098905,
"grad_norm": 3.6867451667785645,
"learning_rate": 4.395604395604396e-06,
"loss": 1.1873,
"step": 44500
},
{
"epoch": 4.945054945054945,
"grad_norm": 4.085856914520264,
"learning_rate": 2.197802197802198e-06,
"loss": 1.1997,
"step": 45000
},
{
"epoch": 5.0,
"grad_norm": 3.7809948921203613,
"learning_rate": 0.0,
"loss": 1.1797,
"step": 45500
}
],
"logging_steps": 500,
"max_steps": 45500,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.6063170257526784e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}