Social-llama / benchmark /trainer_state.json
StarThomas1002's picture
Upload folder using huggingface_hub
366396a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 11.389830508474576,
"eval_steps": 500,
"global_step": 168,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06779661016949153,
"grad_norm": 3.306018590927124,
"learning_rate": 9.999125804563732e-05,
"loss": 2.1358,
"num_input_tokens_seen": 10896,
"step": 1
},
{
"epoch": 0.13559322033898305,
"grad_norm": 2.0162065029144287,
"learning_rate": 9.996503523941994e-05,
"loss": 2.3575,
"num_input_tokens_seen": 20512,
"step": 2
},
{
"epoch": 0.2033898305084746,
"grad_norm": 2.1740899085998535,
"learning_rate": 9.992134075089084e-05,
"loss": 1.4712,
"num_input_tokens_seen": 29984,
"step": 3
},
{
"epoch": 0.2711864406779661,
"grad_norm": 2.0339269638061523,
"learning_rate": 9.986018985905901e-05,
"loss": 2.3124,
"num_input_tokens_seen": 39760,
"step": 4
},
{
"epoch": 0.3389830508474576,
"grad_norm": 1.0339545011520386,
"learning_rate": 9.978160394705668e-05,
"loss": 1.8062,
"num_input_tokens_seen": 49856,
"step": 5
},
{
"epoch": 0.4067796610169492,
"grad_norm": 0.6279817819595337,
"learning_rate": 9.968561049466214e-05,
"loss": 2.0499,
"num_input_tokens_seen": 60208,
"step": 6
},
{
"epoch": 0.4745762711864407,
"grad_norm": 0.5110039114952087,
"learning_rate": 9.957224306869053e-05,
"loss": 1.1603,
"num_input_tokens_seen": 70816,
"step": 7
},
{
"epoch": 0.5423728813559322,
"grad_norm": 0.8199847936630249,
"learning_rate": 9.944154131125642e-05,
"loss": 1.9923,
"num_input_tokens_seen": 79968,
"step": 8
},
{
"epoch": 0.6101694915254238,
"grad_norm": 0.729006290435791,
"learning_rate": 9.92935509259118e-05,
"loss": 1.9688,
"num_input_tokens_seen": 89472,
"step": 9
},
{
"epoch": 0.6779661016949152,
"grad_norm": 1.2717365026474,
"learning_rate": 9.912832366166442e-05,
"loss": 2.2878,
"num_input_tokens_seen": 100336,
"step": 10
},
{
"epoch": 0.7457627118644068,
"grad_norm": 0.6771590113639832,
"learning_rate": 9.894591729488242e-05,
"loss": 2.001,
"num_input_tokens_seen": 110640,
"step": 11
},
{
"epoch": 0.8135593220338984,
"grad_norm": 0.4566394090652466,
"learning_rate": 9.874639560909117e-05,
"loss": 1.8458,
"num_input_tokens_seen": 120304,
"step": 12
},
{
"epoch": 0.8813559322033898,
"grad_norm": 0.6037254929542542,
"learning_rate": 9.852982837266955e-05,
"loss": 1.8775,
"num_input_tokens_seen": 128288,
"step": 13
},
{
"epoch": 0.9491525423728814,
"grad_norm": 0.6338982582092285,
"learning_rate": 9.829629131445342e-05,
"loss": 1.9309,
"num_input_tokens_seen": 138624,
"step": 14
},
{
"epoch": 1.0169491525423728,
"grad_norm": 0.5118916630744934,
"learning_rate": 9.804586609725499e-05,
"loss": 1.5507,
"num_input_tokens_seen": 148208,
"step": 15
},
{
"epoch": 1.0847457627118644,
"grad_norm": 0.4119274914264679,
"learning_rate": 9.777864028930705e-05,
"loss": 1.6802,
"num_input_tokens_seen": 159200,
"step": 16
},
{
"epoch": 1.152542372881356,
"grad_norm": 0.5065487623214722,
"learning_rate": 9.74947073336423e-05,
"loss": 1.5648,
"num_input_tokens_seen": 166544,
"step": 17
},
{
"epoch": 1.2203389830508475,
"grad_norm": 0.47032174468040466,
"learning_rate": 9.719416651541839e-05,
"loss": 1.3598,
"num_input_tokens_seen": 177120,
"step": 18
},
{
"epoch": 1.288135593220339,
"grad_norm": 0.38840025663375854,
"learning_rate": 9.687712292719997e-05,
"loss": 1.3238,
"num_input_tokens_seen": 186272,
"step": 19
},
{
"epoch": 1.3559322033898304,
"grad_norm": 0.5947024822235107,
"learning_rate": 9.654368743221022e-05,
"loss": 1.8704,
"num_input_tokens_seen": 196480,
"step": 20
},
{
"epoch": 1.423728813559322,
"grad_norm": 0.5424012541770935,
"learning_rate": 9.619397662556435e-05,
"loss": 1.8205,
"num_input_tokens_seen": 203856,
"step": 21
},
{
"epoch": 1.4915254237288136,
"grad_norm": 0.45230892300605774,
"learning_rate": 9.582811279349882e-05,
"loss": 1.3595,
"num_input_tokens_seen": 214512,
"step": 22
},
{
"epoch": 1.559322033898305,
"grad_norm": 0.41904568672180176,
"learning_rate": 9.544622387061055e-05,
"loss": 2.0797,
"num_input_tokens_seen": 223792,
"step": 23
},
{
"epoch": 1.6271186440677967,
"grad_norm": 0.5474861264228821,
"learning_rate": 9.504844339512095e-05,
"loss": 2.15,
"num_input_tokens_seen": 233776,
"step": 24
},
{
"epoch": 1.694915254237288,
"grad_norm": 0.4554285407066345,
"learning_rate": 9.463491046218058e-05,
"loss": 2.0059,
"num_input_tokens_seen": 243600,
"step": 25
},
{
"epoch": 1.7627118644067796,
"grad_norm": 0.4554867744445801,
"learning_rate": 9.420576967523049e-05,
"loss": 1.2697,
"num_input_tokens_seen": 254608,
"step": 26
},
{
"epoch": 1.8305084745762712,
"grad_norm": 0.44335854053497314,
"learning_rate": 9.376117109543769e-05,
"loss": 1.6259,
"num_input_tokens_seen": 265728,
"step": 27
},
{
"epoch": 1.8983050847457628,
"grad_norm": 0.36280569434165955,
"learning_rate": 9.330127018922194e-05,
"loss": 1.0618,
"num_input_tokens_seen": 276624,
"step": 28
},
{
"epoch": 1.9661016949152543,
"grad_norm": 0.4373883306980133,
"learning_rate": 9.282622777389258e-05,
"loss": 1.8557,
"num_input_tokens_seen": 287072,
"step": 29
},
{
"epoch": 2.0338983050847457,
"grad_norm": 0.5854040384292603,
"learning_rate": 9.233620996141421e-05,
"loss": 1.8077,
"num_input_tokens_seen": 296248,
"step": 30
},
{
"epoch": 2.1016949152542375,
"grad_norm": 0.5168242454528809,
"learning_rate": 9.183138810032099e-05,
"loss": 1.767,
"num_input_tokens_seen": 305560,
"step": 31
},
{
"epoch": 2.169491525423729,
"grad_norm": 0.4406554698944092,
"learning_rate": 9.131193871579975e-05,
"loss": 1.5561,
"num_input_tokens_seen": 317176,
"step": 32
},
{
"epoch": 2.23728813559322,
"grad_norm": 0.48960959911346436,
"learning_rate": 9.077804344796302e-05,
"loss": 1.7631,
"num_input_tokens_seen": 327352,
"step": 33
},
{
"epoch": 2.305084745762712,
"grad_norm": 0.4791744351387024,
"learning_rate": 9.022988898833342e-05,
"loss": 1.5754,
"num_input_tokens_seen": 337384,
"step": 34
},
{
"epoch": 2.3728813559322033,
"grad_norm": 0.42596694827079773,
"learning_rate": 8.966766701456177e-05,
"loss": 1.4501,
"num_input_tokens_seen": 346952,
"step": 35
},
{
"epoch": 2.440677966101695,
"grad_norm": 0.46600142121315,
"learning_rate": 8.90915741234015e-05,
"loss": 1.8493,
"num_input_tokens_seen": 356568,
"step": 36
},
{
"epoch": 2.5084745762711864,
"grad_norm": 0.49377578496932983,
"learning_rate": 8.850181176196315e-05,
"loss": 1.6969,
"num_input_tokens_seen": 366504,
"step": 37
},
{
"epoch": 2.576271186440678,
"grad_norm": 0.42628544569015503,
"learning_rate": 8.789858615727265e-05,
"loss": 1.3998,
"num_input_tokens_seen": 377512,
"step": 38
},
{
"epoch": 2.6440677966101696,
"grad_norm": 0.37030258774757385,
"learning_rate": 8.728210824415827e-05,
"loss": 1.1354,
"num_input_tokens_seen": 388728,
"step": 39
},
{
"epoch": 2.711864406779661,
"grad_norm": 0.763684093952179,
"learning_rate": 8.665259359149132e-05,
"loss": 1.5526,
"num_input_tokens_seen": 393800,
"step": 40
},
{
"epoch": 2.7796610169491527,
"grad_norm": 0.5274456143379211,
"learning_rate": 8.601026232680634e-05,
"loss": 1.671,
"num_input_tokens_seen": 404072,
"step": 41
},
{
"epoch": 2.847457627118644,
"grad_norm": 0.4358402192592621,
"learning_rate": 8.535533905932738e-05,
"loss": 1.1851,
"num_input_tokens_seen": 415336,
"step": 42
},
{
"epoch": 2.915254237288136,
"grad_norm": 0.6406439542770386,
"learning_rate": 8.468805280142709e-05,
"loss": 1.8113,
"num_input_tokens_seen": 425288,
"step": 43
},
{
"epoch": 2.983050847457627,
"grad_norm": 0.48384496569633484,
"learning_rate": 8.400863688854597e-05,
"loss": 1.2456,
"num_input_tokens_seen": 436584,
"step": 44
},
{
"epoch": 3.0508474576271185,
"grad_norm": 0.6171286702156067,
"learning_rate": 8.33173288976002e-05,
"loss": 1.479,
"num_input_tokens_seen": 445072,
"step": 45
},
{
"epoch": 3.1186440677966103,
"grad_norm": 0.5578075051307678,
"learning_rate": 8.261437056390606e-05,
"loss": 1.8757,
"num_input_tokens_seen": 453696,
"step": 46
},
{
"epoch": 3.1864406779661016,
"grad_norm": 0.5808205604553223,
"learning_rate": 8.190000769665044e-05,
"loss": 1.4613,
"num_input_tokens_seen": 462976,
"step": 47
},
{
"epoch": 3.2542372881355934,
"grad_norm": 0.46899017691612244,
"learning_rate": 8.117449009293668e-05,
"loss": 1.2657,
"num_input_tokens_seen": 474016,
"step": 48
},
{
"epoch": 3.3220338983050848,
"grad_norm": 0.677277147769928,
"learning_rate": 8.043807145043604e-05,
"loss": 1.7769,
"num_input_tokens_seen": 482960,
"step": 49
},
{
"epoch": 3.389830508474576,
"grad_norm": 0.543119490146637,
"learning_rate": 7.969100927867507e-05,
"loss": 1.3444,
"num_input_tokens_seen": 493200,
"step": 50
},
{
"epoch": 3.457627118644068,
"grad_norm": 0.5538585782051086,
"learning_rate": 7.89335648089903e-05,
"loss": 1.4113,
"num_input_tokens_seen": 503168,
"step": 51
},
{
"epoch": 3.5254237288135593,
"grad_norm": 0.6398975253105164,
"learning_rate": 7.81660029031811e-05,
"loss": 1.5596,
"num_input_tokens_seen": 513776,
"step": 52
},
{
"epoch": 3.593220338983051,
"grad_norm": 0.8313115835189819,
"learning_rate": 7.738859196089358e-05,
"loss": 1.7417,
"num_input_tokens_seen": 521376,
"step": 53
},
{
"epoch": 3.6610169491525424,
"grad_norm": 0.777786910533905,
"learning_rate": 7.660160382576683e-05,
"loss": 1.7439,
"num_input_tokens_seen": 531504,
"step": 54
},
{
"epoch": 3.7288135593220337,
"grad_norm": 0.7124835848808289,
"learning_rate": 7.580531369037533e-05,
"loss": 1.2392,
"num_input_tokens_seen": 542832,
"step": 55
},
{
"epoch": 3.7966101694915255,
"grad_norm": 0.7158543467521667,
"learning_rate": 7.500000000000001e-05,
"loss": 1.4463,
"num_input_tokens_seen": 554128,
"step": 56
},
{
"epoch": 3.864406779661017,
"grad_norm": 0.6232526898384094,
"learning_rate": 7.4185944355262e-05,
"loss": 1.1846,
"num_input_tokens_seen": 564576,
"step": 57
},
{
"epoch": 3.9322033898305087,
"grad_norm": 0.6178350448608398,
"learning_rate": 7.33634314136531e-05,
"loss": 1.1875,
"num_input_tokens_seen": 574464,
"step": 58
},
{
"epoch": 4.0,
"grad_norm": 0.720112144947052,
"learning_rate": 7.253274878999727e-05,
"loss": 1.1346,
"num_input_tokens_seen": 584192,
"step": 59
},
{
"epoch": 4.067796610169491,
"grad_norm": 0.8956817388534546,
"learning_rate": 7.169418695587791e-05,
"loss": 1.6462,
"num_input_tokens_seen": 593440,
"step": 60
},
{
"epoch": 4.135593220338983,
"grad_norm": 0.9503009915351868,
"learning_rate": 7.084803913806641e-05,
"loss": 1.0892,
"num_input_tokens_seen": 603168,
"step": 61
},
{
"epoch": 4.203389830508475,
"grad_norm": 0.843825101852417,
"learning_rate": 6.999460121598704e-05,
"loss": 1.4054,
"num_input_tokens_seen": 611888,
"step": 62
},
{
"epoch": 4.271186440677966,
"grad_norm": 0.7209855318069458,
"learning_rate": 6.91341716182545e-05,
"loss": 0.9302,
"num_input_tokens_seen": 623312,
"step": 63
},
{
"epoch": 4.338983050847458,
"grad_norm": 0.884834885597229,
"learning_rate": 6.826705121831976e-05,
"loss": 1.2218,
"num_input_tokens_seen": 634768,
"step": 64
},
{
"epoch": 4.406779661016949,
"grad_norm": 1.0309629440307617,
"learning_rate": 6.739354322926136e-05,
"loss": 1.1605,
"num_input_tokens_seen": 644848,
"step": 65
},
{
"epoch": 4.47457627118644,
"grad_norm": 1.097214698791504,
"learning_rate": 6.651395309775837e-05,
"loss": 1.7099,
"num_input_tokens_seen": 654672,
"step": 66
},
{
"epoch": 4.5423728813559325,
"grad_norm": 0.7945898175239563,
"learning_rate": 6.562858839728223e-05,
"loss": 0.8147,
"num_input_tokens_seen": 665584,
"step": 67
},
{
"epoch": 4.610169491525424,
"grad_norm": 1.2936004400253296,
"learning_rate": 6.473775872054521e-05,
"loss": 1.5787,
"num_input_tokens_seen": 674096,
"step": 68
},
{
"epoch": 4.677966101694915,
"grad_norm": 1.0135834217071533,
"learning_rate": 6.384177557124247e-05,
"loss": 1.2265,
"num_input_tokens_seen": 683968,
"step": 69
},
{
"epoch": 4.745762711864407,
"grad_norm": 0.9531109929084778,
"learning_rate": 6.294095225512603e-05,
"loss": 1.2753,
"num_input_tokens_seen": 694368,
"step": 70
},
{
"epoch": 4.813559322033898,
"grad_norm": 1.0897727012634277,
"learning_rate": 6.203560377044866e-05,
"loss": 1.6676,
"num_input_tokens_seen": 703440,
"step": 71
},
{
"epoch": 4.88135593220339,
"grad_norm": 1.4037953615188599,
"learning_rate": 6.112604669781572e-05,
"loss": 1.4984,
"num_input_tokens_seen": 711264,
"step": 72
},
{
"epoch": 4.9491525423728815,
"grad_norm": 1.1747398376464844,
"learning_rate": 6.021259908948402e-05,
"loss": 1.6282,
"num_input_tokens_seen": 722624,
"step": 73
},
{
"epoch": 5.016949152542373,
"grad_norm": 0.8543111085891724,
"learning_rate": 5.9295580358145744e-05,
"loss": 0.8916,
"num_input_tokens_seen": 730976,
"step": 74
},
{
"epoch": 5.084745762711864,
"grad_norm": 0.9056837558746338,
"learning_rate": 5.837531116523682e-05,
"loss": 0.7409,
"num_input_tokens_seen": 740160,
"step": 75
},
{
"epoch": 5.1525423728813555,
"grad_norm": 1.1560508012771606,
"learning_rate": 5.745211330880872e-05,
"loss": 1.3499,
"num_input_tokens_seen": 750544,
"step": 76
},
{
"epoch": 5.220338983050848,
"grad_norm": 0.9036076664924622,
"learning_rate": 5.6526309611002594e-05,
"loss": 0.901,
"num_input_tokens_seen": 761744,
"step": 77
},
{
"epoch": 5.288135593220339,
"grad_norm": 1.3706295490264893,
"learning_rate": 5.559822380516539e-05,
"loss": 1.1879,
"num_input_tokens_seen": 772944,
"step": 78
},
{
"epoch": 5.3559322033898304,
"grad_norm": 1.5141230821609497,
"learning_rate": 5.466818042264753e-05,
"loss": 1.0959,
"num_input_tokens_seen": 781296,
"step": 79
},
{
"epoch": 5.423728813559322,
"grad_norm": 1.3105418682098389,
"learning_rate": 5.373650467932122e-05,
"loss": 1.2333,
"num_input_tokens_seen": 792416,
"step": 80
},
{
"epoch": 5.491525423728813,
"grad_norm": 1.5772976875305176,
"learning_rate": 5.2803522361859594e-05,
"loss": 1.1665,
"num_input_tokens_seen": 802688,
"step": 81
},
{
"epoch": 5.559322033898305,
"grad_norm": 1.1764063835144043,
"learning_rate": 5.18695597138163e-05,
"loss": 1.0339,
"num_input_tokens_seen": 813376,
"step": 82
},
{
"epoch": 5.627118644067797,
"grad_norm": 1.3924024105072021,
"learning_rate": 5.0934943321545115e-05,
"loss": 1.0024,
"num_input_tokens_seen": 822752,
"step": 83
},
{
"epoch": 5.694915254237288,
"grad_norm": 1.2736717462539673,
"learning_rate": 5e-05,
"loss": 0.9337,
"num_input_tokens_seen": 832720,
"step": 84
},
{
"epoch": 5.762711864406779,
"grad_norm": 1.5381358861923218,
"learning_rate": 4.9065056678454904e-05,
"loss": 1.0879,
"num_input_tokens_seen": 842880,
"step": 85
},
{
"epoch": 5.830508474576272,
"grad_norm": 1.6273722648620605,
"learning_rate": 4.813044028618373e-05,
"loss": 1.3822,
"num_input_tokens_seen": 851968,
"step": 86
},
{
"epoch": 5.898305084745763,
"grad_norm": 1.8411167860031128,
"learning_rate": 4.7196477638140404e-05,
"loss": 1.3801,
"num_input_tokens_seen": 861632,
"step": 87
},
{
"epoch": 5.966101694915254,
"grad_norm": 1.8025944232940674,
"learning_rate": 4.626349532067879e-05,
"loss": 1.3577,
"num_input_tokens_seen": 872352,
"step": 88
},
{
"epoch": 6.033898305084746,
"grad_norm": 1.44851815700531,
"learning_rate": 4.5331819577352474e-05,
"loss": 0.7063,
"num_input_tokens_seen": 881072,
"step": 89
},
{
"epoch": 6.101694915254237,
"grad_norm": 1.7997266054153442,
"learning_rate": 4.4401776194834613e-05,
"loss": 1.35,
"num_input_tokens_seen": 891840,
"step": 90
},
{
"epoch": 6.169491525423728,
"grad_norm": 2.1974358558654785,
"learning_rate": 4.347369038899744e-05,
"loss": 1.2183,
"num_input_tokens_seen": 901232,
"step": 91
},
{
"epoch": 6.237288135593221,
"grad_norm": 2.028881072998047,
"learning_rate": 4.254788669119127e-05,
"loss": 1.0497,
"num_input_tokens_seen": 910496,
"step": 92
},
{
"epoch": 6.305084745762712,
"grad_norm": 1.7718591690063477,
"learning_rate": 4.162468883476319e-05,
"loss": 0.6885,
"num_input_tokens_seen": 919856,
"step": 93
},
{
"epoch": 6.372881355932203,
"grad_norm": 2.046247720718384,
"learning_rate": 4.0704419641854274e-05,
"loss": 0.9335,
"num_input_tokens_seen": 931104,
"step": 94
},
{
"epoch": 6.440677966101695,
"grad_norm": 2.308433771133423,
"learning_rate": 3.978740091051599e-05,
"loss": 1.0183,
"num_input_tokens_seen": 940976,
"step": 95
},
{
"epoch": 6.508474576271187,
"grad_norm": 2.1246485710144043,
"learning_rate": 3.887395330218429e-05,
"loss": 1.0591,
"num_input_tokens_seen": 949248,
"step": 96
},
{
"epoch": 6.576271186440678,
"grad_norm": 2.154073715209961,
"learning_rate": 3.7964396229551364e-05,
"loss": 1.0101,
"num_input_tokens_seen": 960336,
"step": 97
},
{
"epoch": 6.6440677966101696,
"grad_norm": 1.561721920967102,
"learning_rate": 3.705904774487396e-05,
"loss": 0.6923,
"num_input_tokens_seen": 970480,
"step": 98
},
{
"epoch": 6.711864406779661,
"grad_norm": 2.500340461730957,
"learning_rate": 3.6158224428757535e-05,
"loss": 0.9599,
"num_input_tokens_seen": 979104,
"step": 99
},
{
"epoch": 6.779661016949152,
"grad_norm": 2.429661512374878,
"learning_rate": 3.5262241279454785e-05,
"loss": 1.0911,
"num_input_tokens_seen": 989312,
"step": 100
},
{
"epoch": 6.847457627118644,
"grad_norm": 1.9327709674835205,
"learning_rate": 3.4371411602717784e-05,
"loss": 0.9246,
"num_input_tokens_seen": 999312,
"step": 101
},
{
"epoch": 6.915254237288136,
"grad_norm": 2.1729629039764404,
"learning_rate": 3.3486046902241664e-05,
"loss": 0.811,
"num_input_tokens_seen": 1010096,
"step": 102
},
{
"epoch": 6.983050847457627,
"grad_norm": 2.6227834224700928,
"learning_rate": 3.2606456770738636e-05,
"loss": 0.8122,
"num_input_tokens_seen": 1018704,
"step": 103
},
{
"epoch": 7.0508474576271185,
"grad_norm": 2.424504041671753,
"learning_rate": 3.173294878168025e-05,
"loss": 0.6868,
"num_input_tokens_seen": 1027008,
"step": 104
},
{
"epoch": 7.11864406779661,
"grad_norm": 2.2759225368499756,
"learning_rate": 3.086582838174551e-05,
"loss": 0.8705,
"num_input_tokens_seen": 1038016,
"step": 105
},
{
"epoch": 7.186440677966102,
"grad_norm": 2.652327299118042,
"learning_rate": 3.000539878401296e-05,
"loss": 1.0279,
"num_input_tokens_seen": 1046912,
"step": 106
},
{
"epoch": 7.254237288135593,
"grad_norm": 2.538667678833008,
"learning_rate": 2.9151960861933614e-05,
"loss": 0.9108,
"num_input_tokens_seen": 1056480,
"step": 107
},
{
"epoch": 7.322033898305085,
"grad_norm": 3.2955853939056396,
"learning_rate": 2.8305813044122097e-05,
"loss": 0.9454,
"num_input_tokens_seen": 1066080,
"step": 108
},
{
"epoch": 7.389830508474576,
"grad_norm": 2.35931134223938,
"learning_rate": 2.746725121000273e-05,
"loss": 0.7537,
"num_input_tokens_seen": 1075376,
"step": 109
},
{
"epoch": 7.4576271186440675,
"grad_norm": 2.282921552658081,
"learning_rate": 2.66365685863469e-05,
"loss": 0.87,
"num_input_tokens_seen": 1086448,
"step": 110
},
{
"epoch": 7.52542372881356,
"grad_norm": 1.762494683265686,
"learning_rate": 2.581405564473801e-05,
"loss": 0.5695,
"num_input_tokens_seen": 1098192,
"step": 111
},
{
"epoch": 7.593220338983051,
"grad_norm": 1.9810832738876343,
"learning_rate": 2.500000000000001e-05,
"loss": 0.5695,
"num_input_tokens_seen": 1108912,
"step": 112
},
{
"epoch": 7.661016949152542,
"grad_norm": 2.754438877105713,
"learning_rate": 2.4194686309624663e-05,
"loss": 0.7164,
"num_input_tokens_seen": 1116672,
"step": 113
},
{
"epoch": 7.728813559322034,
"grad_norm": 2.1492197513580322,
"learning_rate": 2.3398396174233178e-05,
"loss": 0.8342,
"num_input_tokens_seen": 1126640,
"step": 114
},
{
"epoch": 7.796610169491525,
"grad_norm": 2.815870761871338,
"learning_rate": 2.261140803910644e-05,
"loss": 0.8934,
"num_input_tokens_seen": 1135920,
"step": 115
},
{
"epoch": 7.864406779661017,
"grad_norm": 2.1932082176208496,
"learning_rate": 2.1833997096818898e-05,
"loss": 0.8654,
"num_input_tokens_seen": 1147216,
"step": 116
},
{
"epoch": 7.932203389830509,
"grad_norm": 2.1254825592041016,
"learning_rate": 2.1066435191009715e-05,
"loss": 0.4848,
"num_input_tokens_seen": 1158288,
"step": 117
},
{
"epoch": 8.0,
"grad_norm": 3.3997814655303955,
"learning_rate": 2.0308990721324927e-05,
"loss": 0.9819,
"num_input_tokens_seen": 1167328,
"step": 118
},
{
"epoch": 8.067796610169491,
"grad_norm": 2.623171329498291,
"learning_rate": 1.9561928549563968e-05,
"loss": 0.7783,
"num_input_tokens_seen": 1177600,
"step": 119
},
{
"epoch": 8.135593220338983,
"grad_norm": 3.17315936088562,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.7392,
"num_input_tokens_seen": 1186256,
"step": 120
},
{
"epoch": 8.203389830508474,
"grad_norm": 2.5170342922210693,
"learning_rate": 1.8099992303349577e-05,
"loss": 0.5762,
"num_input_tokens_seen": 1195200,
"step": 121
},
{
"epoch": 8.271186440677965,
"grad_norm": 2.4424407482147217,
"learning_rate": 1.738562943609396e-05,
"loss": 0.7936,
"num_input_tokens_seen": 1205776,
"step": 122
},
{
"epoch": 8.338983050847457,
"grad_norm": 2.6299970149993896,
"learning_rate": 1.6682671102399805e-05,
"loss": 0.794,
"num_input_tokens_seen": 1215440,
"step": 123
},
{
"epoch": 8.40677966101695,
"grad_norm": 2.1438987255096436,
"learning_rate": 1.599136311145402e-05,
"loss": 0.3746,
"num_input_tokens_seen": 1226240,
"step": 124
},
{
"epoch": 8.474576271186441,
"grad_norm": 2.2746033668518066,
"learning_rate": 1.531194719857292e-05,
"loss": 0.5889,
"num_input_tokens_seen": 1237296,
"step": 125
},
{
"epoch": 8.542372881355933,
"grad_norm": 2.953768253326416,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.8482,
"num_input_tokens_seen": 1247184,
"step": 126
},
{
"epoch": 8.610169491525424,
"grad_norm": 2.8020410537719727,
"learning_rate": 1.398973767319368e-05,
"loss": 0.789,
"num_input_tokens_seen": 1257200,
"step": 127
},
{
"epoch": 8.677966101694915,
"grad_norm": 2.6043848991394043,
"learning_rate": 1.3347406408508695e-05,
"loss": 0.7434,
"num_input_tokens_seen": 1267568,
"step": 128
},
{
"epoch": 8.745762711864407,
"grad_norm": 2.287994384765625,
"learning_rate": 1.2717891755841722e-05,
"loss": 0.5503,
"num_input_tokens_seen": 1278288,
"step": 129
},
{
"epoch": 8.813559322033898,
"grad_norm": 2.889692544937134,
"learning_rate": 1.2101413842727345e-05,
"loss": 0.5,
"num_input_tokens_seen": 1288464,
"step": 130
},
{
"epoch": 8.88135593220339,
"grad_norm": 2.310126304626465,
"learning_rate": 1.1498188238036861e-05,
"loss": 0.7285,
"num_input_tokens_seen": 1298768,
"step": 131
},
{
"epoch": 8.94915254237288,
"grad_norm": 2.0122134685516357,
"learning_rate": 1.090842587659851e-05,
"loss": 0.3758,
"num_input_tokens_seen": 1308912,
"step": 132
},
{
"epoch": 9.016949152542374,
"grad_norm": 2.8011984825134277,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.5785,
"num_input_tokens_seen": 1317368,
"step": 133
},
{
"epoch": 9.084745762711865,
"grad_norm": 2.287930488586426,
"learning_rate": 9.770111011666583e-06,
"loss": 0.4679,
"num_input_tokens_seen": 1328440,
"step": 134
},
{
"epoch": 9.152542372881356,
"grad_norm": 2.4521470069885254,
"learning_rate": 9.221956552036992e-06,
"loss": 0.6424,
"num_input_tokens_seen": 1337928,
"step": 135
},
{
"epoch": 9.220338983050848,
"grad_norm": 2.9559810161590576,
"learning_rate": 8.688061284200266e-06,
"loss": 0.6096,
"num_input_tokens_seen": 1346520,
"step": 136
},
{
"epoch": 9.288135593220339,
"grad_norm": 2.6230311393737793,
"learning_rate": 8.168611899679013e-06,
"loss": 0.5381,
"num_input_tokens_seen": 1357816,
"step": 137
},
{
"epoch": 9.35593220338983,
"grad_norm": 2.8988404273986816,
"learning_rate": 7.663790038585793e-06,
"loss": 0.8004,
"num_input_tokens_seen": 1368280,
"step": 138
},
{
"epoch": 9.423728813559322,
"grad_norm": 1.9839284420013428,
"learning_rate": 7.173772226107434e-06,
"loss": 0.4723,
"num_input_tokens_seen": 1378520,
"step": 139
},
{
"epoch": 9.491525423728813,
"grad_norm": 2.406280517578125,
"learning_rate": 6.698729810778065e-06,
"loss": 0.4489,
"num_input_tokens_seen": 1388440,
"step": 140
},
{
"epoch": 9.559322033898304,
"grad_norm": 3.5145606994628906,
"learning_rate": 6.238828904562316e-06,
"loss": 0.707,
"num_input_tokens_seen": 1398568,
"step": 141
},
{
"epoch": 9.627118644067796,
"grad_norm": 3.3337950706481934,
"learning_rate": 5.794230324769517e-06,
"loss": 0.6449,
"num_input_tokens_seen": 1408856,
"step": 142
},
{
"epoch": 9.694915254237289,
"grad_norm": 2.906613349914551,
"learning_rate": 5.365089537819434e-06,
"loss": 0.5634,
"num_input_tokens_seen": 1419128,
"step": 143
},
{
"epoch": 9.76271186440678,
"grad_norm": 2.9478540420532227,
"learning_rate": 4.951556604879048e-06,
"loss": 0.6285,
"num_input_tokens_seen": 1429944,
"step": 144
},
{
"epoch": 9.830508474576272,
"grad_norm": 2.812126874923706,
"learning_rate": 4.5537761293894535e-06,
"loss": 0.6617,
"num_input_tokens_seen": 1440360,
"step": 145
},
{
"epoch": 9.898305084745763,
"grad_norm": 3.44775390625,
"learning_rate": 4.1718872065011904e-06,
"loss": 0.7798,
"num_input_tokens_seen": 1450328,
"step": 146
},
{
"epoch": 9.966101694915254,
"grad_norm": 2.9694623947143555,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.7539,
"num_input_tokens_seen": 1461144,
"step": 147
},
{
"epoch": 10.033898305084746,
"grad_norm": 2.622793674468994,
"learning_rate": 3.4563125677897932e-06,
"loss": 0.3182,
"num_input_tokens_seen": 1469864,
"step": 148
},
{
"epoch": 10.101694915254237,
"grad_norm": 2.6204419136047363,
"learning_rate": 3.1228770728000455e-06,
"loss": 0.6796,
"num_input_tokens_seen": 1480248,
"step": 149
},
{
"epoch": 10.169491525423728,
"grad_norm": 2.793680429458618,
"learning_rate": 2.8058334845816213e-06,
"loss": 0.4411,
"num_input_tokens_seen": 1488056,
"step": 150
},
{
"epoch": 10.23728813559322,
"grad_norm": 2.866830348968506,
"learning_rate": 2.5052926663577e-06,
"loss": 0.6203,
"num_input_tokens_seen": 1496920,
"step": 151
},
{
"epoch": 10.305084745762711,
"grad_norm": 2.5276501178741455,
"learning_rate": 2.221359710692961e-06,
"loss": 0.6025,
"num_input_tokens_seen": 1507144,
"step": 152
},
{
"epoch": 10.372881355932204,
"grad_norm": 3.2329604625701904,
"learning_rate": 1.9541339027450256e-06,
"loss": 0.5943,
"num_input_tokens_seen": 1515560,
"step": 153
},
{
"epoch": 10.440677966101696,
"grad_norm": 2.7045376300811768,
"learning_rate": 1.70370868554659e-06,
"loss": 0.5806,
"num_input_tokens_seen": 1526296,
"step": 154
},
{
"epoch": 10.508474576271187,
"grad_norm": 2.964407444000244,
"learning_rate": 1.4701716273304521e-06,
"loss": 0.7033,
"num_input_tokens_seen": 1537064,
"step": 155
},
{
"epoch": 10.576271186440678,
"grad_norm": 2.5632989406585693,
"learning_rate": 1.2536043909088191e-06,
"loss": 0.629,
"num_input_tokens_seen": 1547816,
"step": 156
},
{
"epoch": 10.64406779661017,
"grad_norm": 2.630671739578247,
"learning_rate": 1.0540827051175818e-06,
"loss": 0.6927,
"num_input_tokens_seen": 1557672,
"step": 157
},
{
"epoch": 10.711864406779661,
"grad_norm": 2.9026784896850586,
"learning_rate": 8.716763383355864e-07,
"loss": 0.4882,
"num_input_tokens_seen": 1568264,
"step": 158
},
{
"epoch": 10.779661016949152,
"grad_norm": 2.553671360015869,
"learning_rate": 7.064490740882057e-07,
"loss": 0.543,
"num_input_tokens_seen": 1578744,
"step": 159
},
{
"epoch": 10.847457627118644,
"grad_norm": 2.838085412979126,
"learning_rate": 5.584586887435739e-07,
"loss": 0.3713,
"num_input_tokens_seen": 1588008,
"step": 160
},
{
"epoch": 10.915254237288135,
"grad_norm": 2.673503875732422,
"learning_rate": 4.277569313094809e-07,
"loss": 0.5766,
"num_input_tokens_seen": 1596200,
"step": 161
},
{
"epoch": 10.983050847457626,
"grad_norm": 2.511265516281128,
"learning_rate": 3.143895053378698e-07,
"loss": 0.5419,
"num_input_tokens_seen": 1606760,
"step": 162
},
{
"epoch": 11.05084745762712,
"grad_norm": 3.0119810104370117,
"learning_rate": 2.1839605294330933e-07,
"loss": 0.5242,
"num_input_tokens_seen": 1614864,
"step": 163
},
{
"epoch": 11.11864406779661,
"grad_norm": 3.153884172439575,
"learning_rate": 1.3981014094099353e-07,
"loss": 0.4455,
"num_input_tokens_seen": 1624048,
"step": 164
},
{
"epoch": 11.186440677966102,
"grad_norm": 2.7290897369384766,
"learning_rate": 7.865924910916977e-08,
"loss": 0.6553,
"num_input_tokens_seen": 1633808,
"step": 165
},
{
"epoch": 11.254237288135593,
"grad_norm": 2.6486260890960693,
"learning_rate": 3.496476058006959e-08,
"loss": 0.4241,
"num_input_tokens_seen": 1644560,
"step": 166
},
{
"epoch": 11.322033898305085,
"grad_norm": 2.6112442016601562,
"learning_rate": 8.741954362678772e-09,
"loss": 0.7591,
"num_input_tokens_seen": 1655392,
"step": 167
},
{
"epoch": 11.389830508474576,
"grad_norm": 3.20123028755188,
"learning_rate": 0.0,
"loss": 0.5966,
"num_input_tokens_seen": 1664720,
"step": 168
},
{
"epoch": 11.389830508474576,
"num_input_tokens_seen": 1664720,
"step": 168,
"total_flos": 7.517105945247744e+16,
"train_loss": 1.1265570782124996,
"train_runtime": 1670.3142,
"train_samples_per_second": 0.841,
"train_steps_per_second": 0.101
}
],
"logging_steps": 1,
"max_steps": 168,
"num_input_tokens_seen": 1664720,
"num_train_epochs": 12,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.517105945247744e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}