lesso11's picture
Training in progress, step 100, checkpoint
42395b2 verified
raw
history blame
20.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.001871747838131247,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.871747838131247e-05,
"grad_norm": 0.26614630222320557,
"learning_rate": 1e-05,
"loss": 10.3528,
"step": 1
},
{
"epoch": 1.871747838131247e-05,
"eval_loss": 10.350061416625977,
"eval_runtime": 298.823,
"eval_samples_per_second": 150.557,
"eval_steps_per_second": 18.821,
"step": 1
},
{
"epoch": 3.743495676262494e-05,
"grad_norm": 0.2819650173187256,
"learning_rate": 2e-05,
"loss": 10.3471,
"step": 2
},
{
"epoch": 5.615243514393741e-05,
"grad_norm": 0.2556128203868866,
"learning_rate": 3e-05,
"loss": 10.3536,
"step": 3
},
{
"epoch": 7.486991352524988e-05,
"grad_norm": 0.2791256308555603,
"learning_rate": 4e-05,
"loss": 10.3561,
"step": 4
},
{
"epoch": 9.358739190656235e-05,
"grad_norm": 0.2424553632736206,
"learning_rate": 5e-05,
"loss": 10.3419,
"step": 5
},
{
"epoch": 0.00011230487028787482,
"grad_norm": 0.2598385214805603,
"learning_rate": 6e-05,
"loss": 10.3455,
"step": 6
},
{
"epoch": 0.00013102234866918728,
"grad_norm": 0.2601359784603119,
"learning_rate": 7e-05,
"loss": 10.3473,
"step": 7
},
{
"epoch": 0.00014973982705049975,
"grad_norm": 0.2720239758491516,
"learning_rate": 8e-05,
"loss": 10.3488,
"step": 8
},
{
"epoch": 0.00016845730543181222,
"grad_norm": 0.2774849832057953,
"learning_rate": 9e-05,
"loss": 10.3473,
"step": 9
},
{
"epoch": 0.00016845730543181222,
"eval_loss": 10.347246170043945,
"eval_runtime": 299.9097,
"eval_samples_per_second": 150.012,
"eval_steps_per_second": 18.752,
"step": 9
},
{
"epoch": 0.0001871747838131247,
"grad_norm": 0.24392062425613403,
"learning_rate": 0.0001,
"loss": 10.3473,
"step": 10
},
{
"epoch": 0.00020589226219443717,
"grad_norm": 0.2927881181240082,
"learning_rate": 9.99695413509548e-05,
"loss": 10.3469,
"step": 11
},
{
"epoch": 0.00022460974057574964,
"grad_norm": 0.2824426591396332,
"learning_rate": 9.987820251299122e-05,
"loss": 10.3483,
"step": 12
},
{
"epoch": 0.0002433272189570621,
"grad_norm": 0.27498602867126465,
"learning_rate": 9.972609476841367e-05,
"loss": 10.3453,
"step": 13
},
{
"epoch": 0.00026204469733837456,
"grad_norm": 0.28570154309272766,
"learning_rate": 9.951340343707852e-05,
"loss": 10.3435,
"step": 14
},
{
"epoch": 0.00028076217571968706,
"grad_norm": 0.2508615553379059,
"learning_rate": 9.924038765061042e-05,
"loss": 10.342,
"step": 15
},
{
"epoch": 0.0002994796541009995,
"grad_norm": 0.2362649291753769,
"learning_rate": 9.890738003669029e-05,
"loss": 10.3395,
"step": 16
},
{
"epoch": 0.000318197132482312,
"grad_norm": 0.25537407398223877,
"learning_rate": 9.851478631379982e-05,
"loss": 10.3409,
"step": 17
},
{
"epoch": 0.00033691461086362445,
"grad_norm": 0.3025045096874237,
"learning_rate": 9.806308479691595e-05,
"loss": 10.3424,
"step": 18
},
{
"epoch": 0.00033691461086362445,
"eval_loss": 10.340171813964844,
"eval_runtime": 299.7168,
"eval_samples_per_second": 150.108,
"eval_steps_per_second": 18.764,
"step": 18
},
{
"epoch": 0.00035563208924493695,
"grad_norm": 0.24353958666324615,
"learning_rate": 9.755282581475769e-05,
"loss": 10.3372,
"step": 19
},
{
"epoch": 0.0003743495676262494,
"grad_norm": 0.3033573627471924,
"learning_rate": 9.698463103929542e-05,
"loss": 10.3394,
"step": 20
},
{
"epoch": 0.00039306704600756184,
"grad_norm": 0.2806469798088074,
"learning_rate": 9.635919272833938e-05,
"loss": 10.3362,
"step": 21
},
{
"epoch": 0.00041178452438887434,
"grad_norm": 0.27811411023139954,
"learning_rate": 9.567727288213005e-05,
"loss": 10.3387,
"step": 22
},
{
"epoch": 0.0004305020027701868,
"grad_norm": 0.31206846237182617,
"learning_rate": 9.493970231495835e-05,
"loss": 10.3387,
"step": 23
},
{
"epoch": 0.0004492194811514993,
"grad_norm": 0.27811336517333984,
"learning_rate": 9.414737964294636e-05,
"loss": 10.3356,
"step": 24
},
{
"epoch": 0.0004679369595328117,
"grad_norm": 0.26259589195251465,
"learning_rate": 9.330127018922194e-05,
"loss": 10.331,
"step": 25
},
{
"epoch": 0.0004866544379141242,
"grad_norm": 0.2994815707206726,
"learning_rate": 9.24024048078213e-05,
"loss": 10.3276,
"step": 26
},
{
"epoch": 0.0005053719162954367,
"grad_norm": 0.28016024827957153,
"learning_rate": 9.145187862775209e-05,
"loss": 10.3326,
"step": 27
},
{
"epoch": 0.0005053719162954367,
"eval_loss": 10.33263111114502,
"eval_runtime": 299.404,
"eval_samples_per_second": 150.265,
"eval_steps_per_second": 18.784,
"step": 27
},
{
"epoch": 0.0005240893946767491,
"grad_norm": 0.27367493510246277,
"learning_rate": 9.045084971874738e-05,
"loss": 10.3351,
"step": 28
},
{
"epoch": 0.0005428068730580617,
"grad_norm": 0.31606125831604004,
"learning_rate": 8.940053768033609e-05,
"loss": 10.3341,
"step": 29
},
{
"epoch": 0.0005615243514393741,
"grad_norm": 0.2970883846282959,
"learning_rate": 8.83022221559489e-05,
"loss": 10.3328,
"step": 30
},
{
"epoch": 0.0005802418298206866,
"grad_norm": 0.295673131942749,
"learning_rate": 8.715724127386972e-05,
"loss": 10.3295,
"step": 31
},
{
"epoch": 0.000598959308201999,
"grad_norm": 0.2940577566623688,
"learning_rate": 8.596699001693255e-05,
"loss": 10.3302,
"step": 32
},
{
"epoch": 0.0006176767865833115,
"grad_norm": 0.33430805802345276,
"learning_rate": 8.473291852294987e-05,
"loss": 10.3315,
"step": 33
},
{
"epoch": 0.000636394264964624,
"grad_norm": 0.2802128791809082,
"learning_rate": 8.345653031794292e-05,
"loss": 10.3286,
"step": 34
},
{
"epoch": 0.0006551117433459365,
"grad_norm": 0.28186723589897156,
"learning_rate": 8.213938048432697e-05,
"loss": 10.3258,
"step": 35
},
{
"epoch": 0.0006738292217272489,
"grad_norm": 0.28038591146469116,
"learning_rate": 8.07830737662829e-05,
"loss": 10.3244,
"step": 36
},
{
"epoch": 0.0006738292217272489,
"eval_loss": 10.324920654296875,
"eval_runtime": 299.8741,
"eval_samples_per_second": 150.03,
"eval_steps_per_second": 18.755,
"step": 36
},
{
"epoch": 0.0006925467001085613,
"grad_norm": 0.3061693608760834,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3235,
"step": 37
},
{
"epoch": 0.0007112641784898739,
"grad_norm": 0.30565592646598816,
"learning_rate": 7.795964517353735e-05,
"loss": 10.3245,
"step": 38
},
{
"epoch": 0.0007299816568711863,
"grad_norm": 0.33372026681900024,
"learning_rate": 7.649596321166024e-05,
"loss": 10.3228,
"step": 39
},
{
"epoch": 0.0007486991352524988,
"grad_norm": 0.3121172785758972,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3215,
"step": 40
},
{
"epoch": 0.0007674166136338112,
"grad_norm": 0.2669790983200073,
"learning_rate": 7.347357813929454e-05,
"loss": 10.3181,
"step": 41
},
{
"epoch": 0.0007861340920151237,
"grad_norm": 0.2956819534301758,
"learning_rate": 7.191855733945387e-05,
"loss": 10.3195,
"step": 42
},
{
"epoch": 0.0008048515703964362,
"grad_norm": 0.30040571093559265,
"learning_rate": 7.033683215379002e-05,
"loss": 10.3224,
"step": 43
},
{
"epoch": 0.0008235690487777487,
"grad_norm": 0.34042197465896606,
"learning_rate": 6.873032967079561e-05,
"loss": 10.3106,
"step": 44
},
{
"epoch": 0.0008422865271590611,
"grad_norm": 0.2788679003715515,
"learning_rate": 6.710100716628344e-05,
"loss": 10.3156,
"step": 45
},
{
"epoch": 0.0008422865271590611,
"eval_loss": 10.317497253417969,
"eval_runtime": 299.8223,
"eval_samples_per_second": 150.056,
"eval_steps_per_second": 18.758,
"step": 45
},
{
"epoch": 0.0008610040055403736,
"grad_norm": 0.319991797208786,
"learning_rate": 6.545084971874738e-05,
"loss": 10.3134,
"step": 46
},
{
"epoch": 0.0008797214839216861,
"grad_norm": 0.33527472615242004,
"learning_rate": 6.378186779084995e-05,
"loss": 10.319,
"step": 47
},
{
"epoch": 0.0008984389623029986,
"grad_norm": 0.30577778816223145,
"learning_rate": 6.209609477998338e-05,
"loss": 10.3108,
"step": 48
},
{
"epoch": 0.000917156440684311,
"grad_norm": 0.3237847685813904,
"learning_rate": 6.0395584540887963e-05,
"loss": 10.3178,
"step": 49
},
{
"epoch": 0.0009358739190656235,
"grad_norm": 0.3441982567310333,
"learning_rate": 5.868240888334653e-05,
"loss": 10.3177,
"step": 50
},
{
"epoch": 0.0009545913974469359,
"grad_norm": 0.3328624665737152,
"learning_rate": 5.695865504800327e-05,
"loss": 10.3189,
"step": 51
},
{
"epoch": 0.0009733088758282485,
"grad_norm": 0.28461754322052,
"learning_rate": 5.522642316338268e-05,
"loss": 10.3073,
"step": 52
},
{
"epoch": 0.0009920263542095608,
"grad_norm": 0.36582863330841064,
"learning_rate": 5.348782368720626e-05,
"loss": 10.3071,
"step": 53
},
{
"epoch": 0.0010107438325908733,
"grad_norm": 0.3322458267211914,
"learning_rate": 5.174497483512506e-05,
"loss": 10.3108,
"step": 54
},
{
"epoch": 0.0010107438325908733,
"eval_loss": 10.310861587524414,
"eval_runtime": 299.3956,
"eval_samples_per_second": 150.269,
"eval_steps_per_second": 18.785,
"step": 54
},
{
"epoch": 0.001029461310972186,
"grad_norm": 0.322273850440979,
"learning_rate": 5e-05,
"loss": 10.3092,
"step": 55
},
{
"epoch": 0.0010481787893534982,
"grad_norm": 0.3530115783214569,
"learning_rate": 4.825502516487497e-05,
"loss": 10.3106,
"step": 56
},
{
"epoch": 0.0010668962677348108,
"grad_norm": 0.34059804677963257,
"learning_rate": 4.6512176312793736e-05,
"loss": 10.3062,
"step": 57
},
{
"epoch": 0.0010856137461161233,
"grad_norm": 0.31238624453544617,
"learning_rate": 4.477357683661734e-05,
"loss": 10.3152,
"step": 58
},
{
"epoch": 0.0011043312244974357,
"grad_norm": 0.3038608729839325,
"learning_rate": 4.3041344951996746e-05,
"loss": 10.3124,
"step": 59
},
{
"epoch": 0.0011230487028787482,
"grad_norm": 0.31885963678359985,
"learning_rate": 4.131759111665349e-05,
"loss": 10.3135,
"step": 60
},
{
"epoch": 0.0011417661812600606,
"grad_norm": 0.2958202660083771,
"learning_rate": 3.960441545911204e-05,
"loss": 10.3012,
"step": 61
},
{
"epoch": 0.0011604836596413731,
"grad_norm": 0.29056766629219055,
"learning_rate": 3.790390522001662e-05,
"loss": 10.3042,
"step": 62
},
{
"epoch": 0.0011792011380226857,
"grad_norm": 0.339860200881958,
"learning_rate": 3.6218132209150045e-05,
"loss": 10.3026,
"step": 63
},
{
"epoch": 0.0011792011380226857,
"eval_loss": 10.30560302734375,
"eval_runtime": 299.9054,
"eval_samples_per_second": 150.014,
"eval_steps_per_second": 18.753,
"step": 63
},
{
"epoch": 0.001197918616403998,
"grad_norm": 0.3195018768310547,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.3091,
"step": 64
},
{
"epoch": 0.0012166360947853106,
"grad_norm": 0.33313804864883423,
"learning_rate": 3.289899283371657e-05,
"loss": 10.3051,
"step": 65
},
{
"epoch": 0.001235353573166623,
"grad_norm": 0.27785247564315796,
"learning_rate": 3.12696703292044e-05,
"loss": 10.2989,
"step": 66
},
{
"epoch": 0.0012540710515479355,
"grad_norm": 0.29049962759017944,
"learning_rate": 2.9663167846209998e-05,
"loss": 10.3035,
"step": 67
},
{
"epoch": 0.001272788529929248,
"grad_norm": 0.3860156536102295,
"learning_rate": 2.8081442660546125e-05,
"loss": 10.305,
"step": 68
},
{
"epoch": 0.0012915060083105603,
"grad_norm": 0.31317368149757385,
"learning_rate": 2.6526421860705473e-05,
"loss": 10.3047,
"step": 69
},
{
"epoch": 0.001310223486691873,
"grad_norm": 0.30576837062835693,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3051,
"step": 70
},
{
"epoch": 0.0013289409650731852,
"grad_norm": 0.37049248814582825,
"learning_rate": 2.350403678833976e-05,
"loss": 10.3061,
"step": 71
},
{
"epoch": 0.0013476584434544978,
"grad_norm": 0.29345178604125977,
"learning_rate": 2.2040354826462668e-05,
"loss": 10.3006,
"step": 72
},
{
"epoch": 0.0013476584434544978,
"eval_loss": 10.302136421203613,
"eval_runtime": 299.7781,
"eval_samples_per_second": 150.078,
"eval_steps_per_second": 18.761,
"step": 72
},
{
"epoch": 0.0013663759218358103,
"grad_norm": 0.30382636189460754,
"learning_rate": 2.061073738537635e-05,
"loss": 10.2952,
"step": 73
},
{
"epoch": 0.0013850934002171227,
"grad_norm": 0.3255029022693634,
"learning_rate": 1.9216926233717085e-05,
"loss": 10.2985,
"step": 74
},
{
"epoch": 0.0014038108785984352,
"grad_norm": 0.28385812044143677,
"learning_rate": 1.7860619515673033e-05,
"loss": 10.2983,
"step": 75
},
{
"epoch": 0.0014225283569797478,
"grad_norm": 0.314072847366333,
"learning_rate": 1.6543469682057106e-05,
"loss": 10.304,
"step": 76
},
{
"epoch": 0.0014412458353610601,
"grad_norm": 0.3194397985935211,
"learning_rate": 1.526708147705013e-05,
"loss": 10.2998,
"step": 77
},
{
"epoch": 0.0014599633137423727,
"grad_norm": 0.3076654076576233,
"learning_rate": 1.4033009983067452e-05,
"loss": 10.3069,
"step": 78
},
{
"epoch": 0.001478680792123685,
"grad_norm": 0.29715707898139954,
"learning_rate": 1.2842758726130283e-05,
"loss": 10.2996,
"step": 79
},
{
"epoch": 0.0014973982705049976,
"grad_norm": 0.3068283200263977,
"learning_rate": 1.1697777844051105e-05,
"loss": 10.2932,
"step": 80
},
{
"epoch": 0.0015161157488863101,
"grad_norm": 0.2767265737056732,
"learning_rate": 1.0599462319663905e-05,
"loss": 10.2972,
"step": 81
},
{
"epoch": 0.0015161157488863101,
"eval_loss": 10.300215721130371,
"eval_runtime": 299.1587,
"eval_samples_per_second": 150.388,
"eval_steps_per_second": 18.799,
"step": 81
},
{
"epoch": 0.0015348332272676225,
"grad_norm": 0.2779734134674072,
"learning_rate": 9.549150281252633e-06,
"loss": 10.2977,
"step": 82
},
{
"epoch": 0.001553550705648935,
"grad_norm": 0.31111666560173035,
"learning_rate": 8.548121372247918e-06,
"loss": 10.2987,
"step": 83
},
{
"epoch": 0.0015722681840302474,
"grad_norm": 0.3041585385799408,
"learning_rate": 7.597595192178702e-06,
"loss": 10.2968,
"step": 84
},
{
"epoch": 0.00159098566241156,
"grad_norm": 0.28511694073677063,
"learning_rate": 6.698729810778065e-06,
"loss": 10.2999,
"step": 85
},
{
"epoch": 0.0016097031407928725,
"grad_norm": 0.28556522727012634,
"learning_rate": 5.852620357053651e-06,
"loss": 10.2992,
"step": 86
},
{
"epoch": 0.0016284206191741848,
"grad_norm": 0.3389633297920227,
"learning_rate": 5.060297685041659e-06,
"loss": 10.2974,
"step": 87
},
{
"epoch": 0.0016471380975554973,
"grad_norm": 0.28096240758895874,
"learning_rate": 4.322727117869951e-06,
"loss": 10.2994,
"step": 88
},
{
"epoch": 0.0016658555759368097,
"grad_norm": 0.3122832477092743,
"learning_rate": 3.6408072716606346e-06,
"loss": 10.3047,
"step": 89
},
{
"epoch": 0.0016845730543181222,
"grad_norm": 0.3065173029899597,
"learning_rate": 3.0153689607045845e-06,
"loss": 10.2971,
"step": 90
},
{
"epoch": 0.0016845730543181222,
"eval_loss": 10.29946231842041,
"eval_runtime": 299.883,
"eval_samples_per_second": 150.025,
"eval_steps_per_second": 18.754,
"step": 90
},
{
"epoch": 0.0017032905326994348,
"grad_norm": 0.3437938392162323,
"learning_rate": 2.4471741852423237e-06,
"loss": 10.2954,
"step": 91
},
{
"epoch": 0.0017220080110807471,
"grad_norm": 0.2817475199699402,
"learning_rate": 1.9369152030840556e-06,
"loss": 10.3033,
"step": 92
},
{
"epoch": 0.0017407254894620597,
"grad_norm": 0.32707810401916504,
"learning_rate": 1.4852136862001764e-06,
"loss": 10.3018,
"step": 93
},
{
"epoch": 0.0017594429678433722,
"grad_norm": 0.2986222505569458,
"learning_rate": 1.0926199633097157e-06,
"loss": 10.3012,
"step": 94
},
{
"epoch": 0.0017781604462246846,
"grad_norm": 0.2933776378631592,
"learning_rate": 7.596123493895991e-07,
"loss": 10.2931,
"step": 95
},
{
"epoch": 0.0017968779246059971,
"grad_norm": 0.28214213252067566,
"learning_rate": 4.865965629214819e-07,
"loss": 10.2952,
"step": 96
},
{
"epoch": 0.0018155954029873095,
"grad_norm": 0.3439221680164337,
"learning_rate": 2.7390523158633554e-07,
"loss": 10.2972,
"step": 97
},
{
"epoch": 0.001834312881368622,
"grad_norm": 0.3103925585746765,
"learning_rate": 1.2179748700879012e-07,
"loss": 10.3049,
"step": 98
},
{
"epoch": 0.0018530303597499346,
"grad_norm": 0.3452966809272766,
"learning_rate": 3.04586490452119e-08,
"loss": 10.2984,
"step": 99
},
{
"epoch": 0.0018530303597499346,
"eval_loss": 10.299317359924316,
"eval_runtime": 299.8349,
"eval_samples_per_second": 150.049,
"eval_steps_per_second": 18.757,
"step": 99
},
{
"epoch": 0.001871747838131247,
"grad_norm": 0.34136733412742615,
"learning_rate": 0.0,
"loss": 10.2995,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5230244659200.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}