Qwen2.5-32B-Lora-HQ-e-4 / trainer_state.json
FINGU-AI's picture
Upload folder using huggingface_hub
edb8dc4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.146509341199607,
"eval_steps": 100,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03933136676499508,
"grad_norm": 0.481609046459198,
"learning_rate": 0.00015,
"loss": 2.0722,
"step": 5
},
{
"epoch": 0.07866273352999016,
"grad_norm": 0.15720224380493164,
"learning_rate": 0.0003,
"loss": 1.4825,
"step": 10
},
{
"epoch": 0.11799410029498525,
"grad_norm": 0.06716315448284149,
"learning_rate": 0.00029759999999999997,
"loss": 1.3333,
"step": 15
},
{
"epoch": 0.15732546705998032,
"grad_norm": 0.06133478134870529,
"learning_rate": 0.00029519999999999997,
"loss": 1.2341,
"step": 20
},
{
"epoch": 0.19665683382497542,
"grad_norm": 0.07264667749404907,
"learning_rate": 0.00029279999999999996,
"loss": 1.1756,
"step": 25
},
{
"epoch": 0.2359882005899705,
"grad_norm": 0.07928217202425003,
"learning_rate": 0.00029039999999999996,
"loss": 1.1197,
"step": 30
},
{
"epoch": 0.2753195673549656,
"grad_norm": 0.09420346468687057,
"learning_rate": 0.00028799999999999995,
"loss": 1.0834,
"step": 35
},
{
"epoch": 0.31465093411996065,
"grad_norm": 0.0862259566783905,
"learning_rate": 0.00028559999999999995,
"loss": 1.044,
"step": 40
},
{
"epoch": 0.35398230088495575,
"grad_norm": 0.09086894243955612,
"learning_rate": 0.00028319999999999994,
"loss": 1.0205,
"step": 45
},
{
"epoch": 0.39331366764995085,
"grad_norm": 0.08469890058040619,
"learning_rate": 0.0002808,
"loss": 0.9798,
"step": 50
},
{
"epoch": 0.4326450344149459,
"grad_norm": 0.10012397915124893,
"learning_rate": 0.0002784,
"loss": 0.9811,
"step": 55
},
{
"epoch": 0.471976401179941,
"grad_norm": 0.08633492887020111,
"learning_rate": 0.000276,
"loss": 0.9556,
"step": 60
},
{
"epoch": 0.511307767944936,
"grad_norm": 0.09879346191883087,
"learning_rate": 0.0002736,
"loss": 0.9446,
"step": 65
},
{
"epoch": 0.5506391347099312,
"grad_norm": 0.08795857429504395,
"learning_rate": 0.0002712,
"loss": 0.9228,
"step": 70
},
{
"epoch": 0.5899705014749262,
"grad_norm": 0.0837111845612526,
"learning_rate": 0.0002688,
"loss": 0.9279,
"step": 75
},
{
"epoch": 0.6293018682399213,
"grad_norm": 0.08551318198442459,
"learning_rate": 0.00026639999999999997,
"loss": 0.9267,
"step": 80
},
{
"epoch": 0.6686332350049164,
"grad_norm": 0.08481767773628235,
"learning_rate": 0.00026399999999999997,
"loss": 0.9082,
"step": 85
},
{
"epoch": 0.7079646017699115,
"grad_norm": 0.100365050137043,
"learning_rate": 0.00026159999999999996,
"loss": 0.9028,
"step": 90
},
{
"epoch": 0.7472959685349065,
"grad_norm": 0.08463772386312485,
"learning_rate": 0.00025919999999999996,
"loss": 0.8866,
"step": 95
},
{
"epoch": 0.7866273352999017,
"grad_norm": 0.09628409892320633,
"learning_rate": 0.00025679999999999995,
"loss": 0.8787,
"step": 100
},
{
"epoch": 0.7866273352999017,
"eval_loss": 0.8853636980056763,
"eval_runtime": 24.3719,
"eval_samples_per_second": 6.729,
"eval_steps_per_second": 0.862,
"step": 100
},
{
"epoch": 0.8259587020648967,
"grad_norm": 0.08835043758153915,
"learning_rate": 0.00025439999999999995,
"loss": 0.8786,
"step": 105
},
{
"epoch": 0.8652900688298918,
"grad_norm": 0.09190791845321655,
"learning_rate": 0.00025199999999999995,
"loss": 0.8693,
"step": 110
},
{
"epoch": 0.904621435594887,
"grad_norm": 0.08965795487165451,
"learning_rate": 0.00024959999999999994,
"loss": 0.8772,
"step": 115
},
{
"epoch": 0.943952802359882,
"grad_norm": 0.09055910259485245,
"learning_rate": 0.0002472,
"loss": 0.867,
"step": 120
},
{
"epoch": 0.983284169124877,
"grad_norm": 0.09172637015581131,
"learning_rate": 0.0002448,
"loss": 0.8536,
"step": 125
},
{
"epoch": 1.022615535889872,
"grad_norm": 0.10374542325735092,
"learning_rate": 0.00024239999999999998,
"loss": 0.9888,
"step": 130
},
{
"epoch": 1.0619469026548674,
"grad_norm": 0.08842068910598755,
"learning_rate": 0.00023999999999999998,
"loss": 0.8443,
"step": 135
},
{
"epoch": 1.1012782694198624,
"grad_norm": 0.0736837387084961,
"learning_rate": 0.0002376,
"loss": 0.8457,
"step": 140
},
{
"epoch": 1.1406096361848574,
"grad_norm": 0.07575016468763351,
"learning_rate": 0.0002352,
"loss": 0.8335,
"step": 145
},
{
"epoch": 1.1799410029498525,
"grad_norm": 0.07092955708503723,
"learning_rate": 0.0002328,
"loss": 0.8246,
"step": 150
},
{
"epoch": 1.2192723697148475,
"grad_norm": 0.077423095703125,
"learning_rate": 0.0002304,
"loss": 0.823,
"step": 155
},
{
"epoch": 1.2586037364798428,
"grad_norm": 0.07389391213655472,
"learning_rate": 0.00022799999999999999,
"loss": 0.819,
"step": 160
},
{
"epoch": 1.2979351032448379,
"grad_norm": 0.08229434490203857,
"learning_rate": 0.00022559999999999998,
"loss": 0.8181,
"step": 165
},
{
"epoch": 1.337266470009833,
"grad_norm": 0.07665972411632538,
"learning_rate": 0.00022319999999999998,
"loss": 0.8118,
"step": 170
},
{
"epoch": 1.376597836774828,
"grad_norm": 0.09001573175191879,
"learning_rate": 0.00022079999999999997,
"loss": 0.8157,
"step": 175
},
{
"epoch": 1.415929203539823,
"grad_norm": 0.07965826243162155,
"learning_rate": 0.00021839999999999997,
"loss": 0.8111,
"step": 180
},
{
"epoch": 1.455260570304818,
"grad_norm": 0.08642959594726562,
"learning_rate": 0.00021599999999999996,
"loss": 0.8003,
"step": 185
},
{
"epoch": 1.494591937069813,
"grad_norm": 0.0749087929725647,
"learning_rate": 0.00021359999999999996,
"loss": 0.7975,
"step": 190
},
{
"epoch": 1.5339233038348081,
"grad_norm": 0.08575734496116638,
"learning_rate": 0.00021119999999999996,
"loss": 0.7888,
"step": 195
},
{
"epoch": 1.5732546705998034,
"grad_norm": 0.0887129157781601,
"learning_rate": 0.00020879999999999998,
"loss": 0.7857,
"step": 200
},
{
"epoch": 1.5732546705998034,
"eval_loss": 0.8026237487792969,
"eval_runtime": 24.2397,
"eval_samples_per_second": 6.766,
"eval_steps_per_second": 0.866,
"step": 200
},
{
"epoch": 1.6125860373647984,
"grad_norm": 0.0926935002207756,
"learning_rate": 0.00020639999999999998,
"loss": 0.7877,
"step": 205
},
{
"epoch": 1.6519174041297935,
"grad_norm": 0.08537031710147858,
"learning_rate": 0.000204,
"loss": 0.7767,
"step": 210
},
{
"epoch": 1.6912487708947888,
"grad_norm": 0.0766814798116684,
"learning_rate": 0.0002016,
"loss": 0.785,
"step": 215
},
{
"epoch": 1.7305801376597838,
"grad_norm": 0.08394207805395126,
"learning_rate": 0.0001992,
"loss": 0.7832,
"step": 220
},
{
"epoch": 1.7699115044247788,
"grad_norm": 0.0813060775399208,
"learning_rate": 0.00019679999999999999,
"loss": 0.7766,
"step": 225
},
{
"epoch": 1.809242871189774,
"grad_norm": 0.08242856711149216,
"learning_rate": 0.00019439999999999998,
"loss": 0.7775,
"step": 230
},
{
"epoch": 1.848574237954769,
"grad_norm": 0.07610878348350525,
"learning_rate": 0.00019199999999999998,
"loss": 0.7736,
"step": 235
},
{
"epoch": 1.887905604719764,
"grad_norm": 0.08326178044080734,
"learning_rate": 0.00018959999999999997,
"loss": 0.7753,
"step": 240
},
{
"epoch": 1.927236971484759,
"grad_norm": 0.09425383061170578,
"learning_rate": 0.0001872,
"loss": 0.7577,
"step": 245
},
{
"epoch": 1.966568338249754,
"grad_norm": 0.08694498240947723,
"learning_rate": 0.0001848,
"loss": 0.7606,
"step": 250
},
{
"epoch": 2.005899705014749,
"grad_norm": 0.22805309295654297,
"learning_rate": 0.0001824,
"loss": 0.8871,
"step": 255
},
{
"epoch": 2.045231071779744,
"grad_norm": 0.09610473364591599,
"learning_rate": 0.00017999999999999998,
"loss": 0.7315,
"step": 260
},
{
"epoch": 2.084562438544739,
"grad_norm": 0.09666857868432999,
"learning_rate": 0.00017759999999999998,
"loss": 0.7315,
"step": 265
},
{
"epoch": 2.1238938053097347,
"grad_norm": 0.09328849613666534,
"learning_rate": 0.00017519999999999998,
"loss": 0.7344,
"step": 270
},
{
"epoch": 2.1632251720747298,
"grad_norm": 0.08137473464012146,
"learning_rate": 0.00017279999999999997,
"loss": 0.7347,
"step": 275
},
{
"epoch": 2.202556538839725,
"grad_norm": 0.08166103810071945,
"learning_rate": 0.00017039999999999997,
"loss": 0.7281,
"step": 280
},
{
"epoch": 2.24188790560472,
"grad_norm": 0.08074019104242325,
"learning_rate": 0.000168,
"loss": 0.7345,
"step": 285
},
{
"epoch": 2.281219272369715,
"grad_norm": 0.08479057997465134,
"learning_rate": 0.0001656,
"loss": 0.726,
"step": 290
},
{
"epoch": 2.32055063913471,
"grad_norm": 0.08091601729393005,
"learning_rate": 0.0001632,
"loss": 0.7184,
"step": 295
},
{
"epoch": 2.359882005899705,
"grad_norm": 0.08470489084720612,
"learning_rate": 0.0001608,
"loss": 0.7233,
"step": 300
},
{
"epoch": 2.359882005899705,
"eval_loss": 0.7612683176994324,
"eval_runtime": 24.27,
"eval_samples_per_second": 6.757,
"eval_steps_per_second": 0.865,
"step": 300
},
{
"epoch": 2.3992133726647,
"grad_norm": 0.08677177131175995,
"learning_rate": 0.0001584,
"loss": 0.721,
"step": 305
},
{
"epoch": 2.438544739429695,
"grad_norm": 0.08474377542734146,
"learning_rate": 0.000156,
"loss": 0.7141,
"step": 310
},
{
"epoch": 2.47787610619469,
"grad_norm": 0.08565227687358856,
"learning_rate": 0.0001536,
"loss": 0.7173,
"step": 315
},
{
"epoch": 2.5172074729596856,
"grad_norm": 0.08714301139116287,
"learning_rate": 0.0001512,
"loss": 0.7274,
"step": 320
},
{
"epoch": 2.5565388397246807,
"grad_norm": 0.0934271439909935,
"learning_rate": 0.00014879999999999998,
"loss": 0.7263,
"step": 325
},
{
"epoch": 2.5958702064896757,
"grad_norm": 0.08581375330686569,
"learning_rate": 0.00014639999999999998,
"loss": 0.7248,
"step": 330
},
{
"epoch": 2.6352015732546707,
"grad_norm": 0.08378680050373077,
"learning_rate": 0.00014399999999999998,
"loss": 0.721,
"step": 335
},
{
"epoch": 2.674532940019666,
"grad_norm": 0.08449660986661911,
"learning_rate": 0.00014159999999999997,
"loss": 0.7156,
"step": 340
},
{
"epoch": 2.713864306784661,
"grad_norm": 0.08646751940250397,
"learning_rate": 0.0001392,
"loss": 0.7094,
"step": 345
},
{
"epoch": 2.753195673549656,
"grad_norm": 0.08911272883415222,
"learning_rate": 0.0001368,
"loss": 0.709,
"step": 350
},
{
"epoch": 2.792527040314651,
"grad_norm": 0.0970829427242279,
"learning_rate": 0.0001344,
"loss": 0.7107,
"step": 355
},
{
"epoch": 2.831858407079646,
"grad_norm": 0.0854572132229805,
"learning_rate": 0.00013199999999999998,
"loss": 0.7148,
"step": 360
},
{
"epoch": 2.871189773844641,
"grad_norm": 0.08210612088441849,
"learning_rate": 0.00012959999999999998,
"loss": 0.7132,
"step": 365
},
{
"epoch": 2.910521140609636,
"grad_norm": 0.0925467386841774,
"learning_rate": 0.00012719999999999997,
"loss": 0.7201,
"step": 370
},
{
"epoch": 2.949852507374631,
"grad_norm": 0.09149914979934692,
"learning_rate": 0.00012479999999999997,
"loss": 0.7086,
"step": 375
},
{
"epoch": 2.989183874139626,
"grad_norm": 0.0827464610338211,
"learning_rate": 0.0001224,
"loss": 0.7102,
"step": 380
},
{
"epoch": 3.0285152409046217,
"grad_norm": 0.09861475974321365,
"learning_rate": 0.00011999999999999999,
"loss": 0.8086,
"step": 385
},
{
"epoch": 3.0678466076696167,
"grad_norm": 0.09810496121644974,
"learning_rate": 0.0001176,
"loss": 0.6784,
"step": 390
},
{
"epoch": 3.1071779744346117,
"grad_norm": 0.08657824248075485,
"learning_rate": 0.0001152,
"loss": 0.6818,
"step": 395
},
{
"epoch": 3.146509341199607,
"grad_norm": 0.08861815184354782,
"learning_rate": 0.00011279999999999999,
"loss": 0.6755,
"step": 400
},
{
"epoch": 3.146509341199607,
"eval_loss": 0.7408613562583923,
"eval_runtime": 24.2895,
"eval_samples_per_second": 6.752,
"eval_steps_per_second": 0.865,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 635,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.0292300729210634e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}