lora-warewe-7b / checkpoint-1200 /trainer_state.json
ideepankarsharma2003's picture
added all files
ca643a8
{
"best_metric": 0.8681809306144714,
"best_model_checkpoint": "./lora-warewe/checkpoint-400",
"epoch": 2.9797470318926047,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.9531,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.8714,
"step": 20
},
{
"epoch": 0.07,
"learning_rate": 8.999999999999999e-05,
"loss": 1.7052,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 0.00011999999999999999,
"loss": 1.3201,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 0.00015,
"loss": 1.1551,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 0.00017999999999999998,
"loss": 1.1199,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.00020999999999999998,
"loss": 1.0598,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00023999999999999998,
"loss": 1.0233,
"step": 80
},
{
"epoch": 0.22,
"learning_rate": 0.00027,
"loss": 1.0086,
"step": 90
},
{
"epoch": 0.25,
"learning_rate": 0.0003,
"loss": 0.999,
"step": 100
},
{
"epoch": 0.27,
"learning_rate": 0.0002972875226039783,
"loss": 0.9961,
"step": 110
},
{
"epoch": 0.3,
"learning_rate": 0.00029457504520795656,
"loss": 0.9831,
"step": 120
},
{
"epoch": 0.32,
"learning_rate": 0.0002918625678119349,
"loss": 0.9637,
"step": 130
},
{
"epoch": 0.35,
"learning_rate": 0.00028915009041591315,
"loss": 0.9546,
"step": 140
},
{
"epoch": 0.37,
"learning_rate": 0.0002864376130198915,
"loss": 0.9575,
"step": 150
},
{
"epoch": 0.4,
"learning_rate": 0.00028372513562386974,
"loss": 0.9337,
"step": 160
},
{
"epoch": 0.42,
"learning_rate": 0.00028101265822784807,
"loss": 0.942,
"step": 170
},
{
"epoch": 0.45,
"learning_rate": 0.0002783001808318264,
"loss": 0.9265,
"step": 180
},
{
"epoch": 0.47,
"learning_rate": 0.0002755877034358047,
"loss": 0.9324,
"step": 190
},
{
"epoch": 0.5,
"learning_rate": 0.000272875226039783,
"loss": 0.9288,
"step": 200
},
{
"epoch": 0.5,
"eval_loss": 0.9210162162780762,
"eval_runtime": 559.4854,
"eval_samples_per_second": 3.575,
"eval_steps_per_second": 0.447,
"step": 200
},
{
"epoch": 0.52,
"learning_rate": 0.00027016274864376125,
"loss": 0.9201,
"step": 210
},
{
"epoch": 0.55,
"learning_rate": 0.00026745027124773957,
"loss": 0.9242,
"step": 220
},
{
"epoch": 0.57,
"learning_rate": 0.0002647377938517179,
"loss": 0.9114,
"step": 230
},
{
"epoch": 0.6,
"learning_rate": 0.00026202531645569616,
"loss": 0.9223,
"step": 240
},
{
"epoch": 0.62,
"learning_rate": 0.0002593128390596745,
"loss": 0.9018,
"step": 250
},
{
"epoch": 0.65,
"learning_rate": 0.0002566003616636528,
"loss": 0.9038,
"step": 260
},
{
"epoch": 0.67,
"learning_rate": 0.0002538878842676311,
"loss": 0.9087,
"step": 270
},
{
"epoch": 0.7,
"learning_rate": 0.0002511754068716094,
"loss": 0.9141,
"step": 280
},
{
"epoch": 0.72,
"learning_rate": 0.00024846292947558766,
"loss": 0.8942,
"step": 290
},
{
"epoch": 0.74,
"learning_rate": 0.000245750452079566,
"loss": 0.8865,
"step": 300
},
{
"epoch": 0.77,
"learning_rate": 0.00024303797468354428,
"loss": 0.8863,
"step": 310
},
{
"epoch": 0.79,
"learning_rate": 0.00024032549728752258,
"loss": 0.8981,
"step": 320
},
{
"epoch": 0.82,
"learning_rate": 0.0002376130198915009,
"loss": 0.8837,
"step": 330
},
{
"epoch": 0.84,
"learning_rate": 0.0002349005424954792,
"loss": 0.8887,
"step": 340
},
{
"epoch": 0.87,
"learning_rate": 0.0002321880650994575,
"loss": 0.8781,
"step": 350
},
{
"epoch": 0.89,
"learning_rate": 0.00022947558770343578,
"loss": 0.891,
"step": 360
},
{
"epoch": 0.92,
"learning_rate": 0.00022676311030741408,
"loss": 0.8852,
"step": 370
},
{
"epoch": 0.94,
"learning_rate": 0.00022405063291139237,
"loss": 0.8736,
"step": 380
},
{
"epoch": 0.97,
"learning_rate": 0.0002213381555153707,
"loss": 0.8799,
"step": 390
},
{
"epoch": 0.99,
"learning_rate": 0.000218625678119349,
"loss": 0.8847,
"step": 400
},
{
"epoch": 0.99,
"eval_loss": 0.8681809306144714,
"eval_runtime": 405.865,
"eval_samples_per_second": 4.928,
"eval_steps_per_second": 0.616,
"step": 400
},
{
"epoch": 1.02,
"learning_rate": 0.0002159132007233273,
"loss": 0.8731,
"step": 410
},
{
"epoch": 1.04,
"learning_rate": 0.0002132007233273056,
"loss": 0.8677,
"step": 420
},
{
"epoch": 1.07,
"learning_rate": 0.00021048824593128388,
"loss": 0.8784,
"step": 430
},
{
"epoch": 1.09,
"learning_rate": 0.00020777576853526217,
"loss": 0.8688,
"step": 440
},
{
"epoch": 1.12,
"learning_rate": 0.0002050632911392405,
"loss": 0.8663,
"step": 450
},
{
"epoch": 1.14,
"learning_rate": 0.0002023508137432188,
"loss": 0.8748,
"step": 460
},
{
"epoch": 1.17,
"learning_rate": 0.00019963833634719708,
"loss": 0.8619,
"step": 470
},
{
"epoch": 1.19,
"learning_rate": 0.0001969258589511754,
"loss": 0.8544,
"step": 480
},
{
"epoch": 1.22,
"learning_rate": 0.0001942133815551537,
"loss": 0.871,
"step": 490
},
{
"epoch": 1.24,
"learning_rate": 0.000191500904159132,
"loss": 0.8748,
"step": 500
},
{
"epoch": 1.27,
"learning_rate": 0.00018933092224231464,
"loss": 0.8753,
"step": 510
},
{
"epoch": 1.29,
"learning_rate": 0.00018824593128390595,
"loss": 1.8069,
"step": 520
},
{
"epoch": 1.32,
"learning_rate": 0.00018634719710669076,
"loss": 4.3811,
"step": 530
},
{
"epoch": 1.34,
"learning_rate": 0.00018390596745027123,
"loss": 5.8063,
"step": 540
},
{
"epoch": 1.37,
"learning_rate": 0.00018119349005424955,
"loss": 5.2762,
"step": 550
},
{
"epoch": 1.39,
"learning_rate": 0.00017848101265822782,
"loss": 3.0862,
"step": 560
},
{
"epoch": 1.42,
"learning_rate": 0.00017576853526220611,
"loss": 1.7397,
"step": 570
},
{
"epoch": 1.44,
"learning_rate": 0.00017305605786618444,
"loss": 1.5362,
"step": 580
},
{
"epoch": 1.47,
"learning_rate": 0.0001706148282097649,
"loss": 3.644,
"step": 590
},
{
"epoch": 1.49,
"learning_rate": 0.0001679023508137432,
"loss": 6.1529,
"step": 600
},
{
"epoch": 1.49,
"eval_loss": 6.275171756744385,
"eval_runtime": 401.4674,
"eval_samples_per_second": 4.982,
"eval_steps_per_second": 0.623,
"step": 600
},
{
"epoch": 1.51,
"learning_rate": 0.00016573236889692584,
"loss": 4.6504,
"step": 610
},
{
"epoch": 1.54,
"learning_rate": 0.00016301989150090414,
"loss": 3.3039,
"step": 620
},
{
"epoch": 1.56,
"learning_rate": 0.00016030741410488246,
"loss": 2.7266,
"step": 630
},
{
"epoch": 1.59,
"learning_rate": 0.00015759493670886075,
"loss": 4.5256,
"step": 640
},
{
"epoch": 1.61,
"learning_rate": 0.0001551537070524412,
"loss": 3.9326,
"step": 650
},
{
"epoch": 1.64,
"learning_rate": 0.00015244122965641952,
"loss": 1.6396,
"step": 660
},
{
"epoch": 1.66,
"learning_rate": 0.00014972875226039781,
"loss": 1.1304,
"step": 670
},
{
"epoch": 1.69,
"learning_rate": 0.0001470162748643761,
"loss": 1.0282,
"step": 680
},
{
"epoch": 1.71,
"learning_rate": 0.00014430379746835443,
"loss": 0.9902,
"step": 690
},
{
"epoch": 1.74,
"learning_rate": 0.00014159132007233273,
"loss": 0.9785,
"step": 700
},
{
"epoch": 1.76,
"learning_rate": 0.00013887884267631102,
"loss": 0.9422,
"step": 710
},
{
"epoch": 1.79,
"learning_rate": 0.00013616636528028932,
"loss": 0.9466,
"step": 720
},
{
"epoch": 1.81,
"learning_rate": 0.0001334538878842676,
"loss": 0.9254,
"step": 730
},
{
"epoch": 1.84,
"learning_rate": 0.0001307414104882459,
"loss": 0.9257,
"step": 740
},
{
"epoch": 1.86,
"learning_rate": 0.00012802893309222423,
"loss": 0.9179,
"step": 750
},
{
"epoch": 1.89,
"learning_rate": 0.00012531645569620252,
"loss": 0.9247,
"step": 760
},
{
"epoch": 1.91,
"learning_rate": 0.00012260397830018082,
"loss": 0.9186,
"step": 770
},
{
"epoch": 1.94,
"learning_rate": 0.00011989150090415911,
"loss": 0.9253,
"step": 780
},
{
"epoch": 1.96,
"learning_rate": 0.00011717902350813742,
"loss": 0.9193,
"step": 790
},
{
"epoch": 1.99,
"learning_rate": 0.00011446654611211573,
"loss": 0.9141,
"step": 800
},
{
"epoch": 1.99,
"eval_loss": 0.9085212349891663,
"eval_runtime": 400.6517,
"eval_samples_per_second": 4.992,
"eval_steps_per_second": 0.624,
"step": 800
},
{
"epoch": 2.01,
"learning_rate": 0.00011175406871609403,
"loss": 0.9057,
"step": 810
},
{
"epoch": 2.04,
"learning_rate": 0.00010904159132007232,
"loss": 0.9083,
"step": 820
},
{
"epoch": 2.06,
"learning_rate": 0.00010632911392405062,
"loss": 0.9091,
"step": 830
},
{
"epoch": 2.09,
"learning_rate": 0.00010361663652802893,
"loss": 0.9186,
"step": 840
},
{
"epoch": 2.11,
"learning_rate": 0.00010090415913200722,
"loss": 0.9018,
"step": 850
},
{
"epoch": 2.14,
"learning_rate": 9.819168173598552e-05,
"loss": 0.8989,
"step": 860
},
{
"epoch": 2.16,
"learning_rate": 9.547920433996383e-05,
"loss": 0.9275,
"step": 870
},
{
"epoch": 2.19,
"learning_rate": 9.276672694394213e-05,
"loss": 0.9063,
"step": 880
},
{
"epoch": 2.21,
"learning_rate": 9.005424954792042e-05,
"loss": 0.8921,
"step": 890
},
{
"epoch": 2.23,
"learning_rate": 8.734177215189872e-05,
"loss": 0.8981,
"step": 900
},
{
"epoch": 2.26,
"learning_rate": 8.462929475587703e-05,
"loss": 0.9099,
"step": 910
},
{
"epoch": 2.28,
"learning_rate": 8.191681735985534e-05,
"loss": 0.8956,
"step": 920
},
{
"epoch": 2.31,
"learning_rate": 7.920433996383362e-05,
"loss": 0.8983,
"step": 930
},
{
"epoch": 2.33,
"learning_rate": 7.649186256781193e-05,
"loss": 0.9192,
"step": 940
},
{
"epoch": 2.36,
"learning_rate": 7.377938517179023e-05,
"loss": 0.9199,
"step": 950
},
{
"epoch": 2.38,
"learning_rate": 7.106690777576854e-05,
"loss": 0.8857,
"step": 960
},
{
"epoch": 2.41,
"learning_rate": 6.835443037974683e-05,
"loss": 0.8871,
"step": 970
},
{
"epoch": 2.43,
"learning_rate": 6.564195298372513e-05,
"loss": 0.9054,
"step": 980
},
{
"epoch": 2.46,
"learning_rate": 6.292947558770344e-05,
"loss": 0.9167,
"step": 990
},
{
"epoch": 2.48,
"learning_rate": 6.021699819168174e-05,
"loss": 0.9023,
"step": 1000
},
{
"epoch": 2.48,
"eval_loss": 0.9026673436164856,
"eval_runtime": 401.9867,
"eval_samples_per_second": 4.975,
"eval_steps_per_second": 0.622,
"step": 1000
},
{
"epoch": 2.51,
"learning_rate": 5.750452079566003e-05,
"loss": 0.9043,
"step": 1010
},
{
"epoch": 2.53,
"learning_rate": 5.4792043399638334e-05,
"loss": 0.9052,
"step": 1020
},
{
"epoch": 2.56,
"learning_rate": 5.207956600361663e-05,
"loss": 0.8941,
"step": 1030
},
{
"epoch": 2.58,
"learning_rate": 4.936708860759493e-05,
"loss": 0.9081,
"step": 1040
},
{
"epoch": 2.61,
"learning_rate": 4.6654611211573234e-05,
"loss": 0.9079,
"step": 1050
},
{
"epoch": 2.63,
"learning_rate": 4.394213381555153e-05,
"loss": 0.8925,
"step": 1060
},
{
"epoch": 2.66,
"learning_rate": 4.122965641952984e-05,
"loss": 0.9041,
"step": 1070
},
{
"epoch": 2.68,
"learning_rate": 3.851717902350813e-05,
"loss": 0.902,
"step": 1080
},
{
"epoch": 2.71,
"learning_rate": 3.5804701627486435e-05,
"loss": 0.9011,
"step": 1090
},
{
"epoch": 2.73,
"learning_rate": 3.3092224231464737e-05,
"loss": 0.8883,
"step": 1100
},
{
"epoch": 2.76,
"learning_rate": 3.0379746835443035e-05,
"loss": 0.9039,
"step": 1110
},
{
"epoch": 2.78,
"learning_rate": 2.7667269439421337e-05,
"loss": 0.9078,
"step": 1120
},
{
"epoch": 2.81,
"learning_rate": 2.4954792043399636e-05,
"loss": 0.9065,
"step": 1130
},
{
"epoch": 2.83,
"learning_rate": 2.2242314647377938e-05,
"loss": 0.9084,
"step": 1140
},
{
"epoch": 2.86,
"learning_rate": 1.952983725135624e-05,
"loss": 0.8979,
"step": 1150
},
{
"epoch": 2.88,
"learning_rate": 1.6817359855334538e-05,
"loss": 0.9087,
"step": 1160
},
{
"epoch": 2.91,
"learning_rate": 1.4104882459312838e-05,
"loss": 0.9059,
"step": 1170
},
{
"epoch": 2.93,
"learning_rate": 1.1392405063291139e-05,
"loss": 0.8969,
"step": 1180
},
{
"epoch": 2.95,
"learning_rate": 8.679927667269439e-06,
"loss": 0.9162,
"step": 1190
},
{
"epoch": 2.98,
"learning_rate": 5.967450271247739e-06,
"loss": 0.9065,
"step": 1200
},
{
"epoch": 2.98,
"eval_loss": 0.9031401872634888,
"eval_runtime": 405.8678,
"eval_samples_per_second": 4.928,
"eval_steps_per_second": 0.616,
"step": 1200
}
],
"max_steps": 1206,
"num_train_epochs": 3,
"total_flos": 1.5073290925375488e+18,
"trial_name": null,
"trial_params": null
}