relianceV2 / trainer_state.json
sizhkhy's picture
Upload folder using huggingface_hub
d4d1f2f verified
{
"best_metric": 0.011823242530226707,
"best_model_checkpoint": "/home/paperspace/Data/models/relianceV2/llm3br256/checkpoint-100",
"epoch": 4.921212121212121,
"eval_steps": 5,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.048484848484848485,
"grad_norm": 0.13376639783382416,
"learning_rate": 1e-05,
"loss": 0.063,
"step": 1
},
{
"epoch": 0.09696969696969697,
"grad_norm": 0.12203846871852875,
"learning_rate": 2e-05,
"loss": 0.0607,
"step": 2
},
{
"epoch": 0.14545454545454545,
"grad_norm": 0.13988564908504486,
"learning_rate": 3e-05,
"loss": 0.0615,
"step": 3
},
{
"epoch": 0.19393939393939394,
"grad_norm": 0.10985743254423141,
"learning_rate": 4e-05,
"loss": 0.0504,
"step": 4
},
{
"epoch": 0.24242424242424243,
"grad_norm": 0.07405027747154236,
"learning_rate": 5e-05,
"loss": 0.0395,
"step": 5
},
{
"epoch": 0.24242424242424243,
"eval_loss": 0.04326783865690231,
"eval_runtime": 11.3564,
"eval_samples_per_second": 4.403,
"eval_steps_per_second": 1.145,
"step": 5
},
{
"epoch": 0.2909090909090909,
"grad_norm": 0.09203900396823883,
"learning_rate": 6e-05,
"loss": 0.0419,
"step": 6
},
{
"epoch": 0.3393939393939394,
"grad_norm": 0.07265866547822952,
"learning_rate": 7e-05,
"loss": 0.0381,
"step": 7
},
{
"epoch": 0.3878787878787879,
"grad_norm": 0.04461047425866127,
"learning_rate": 8e-05,
"loss": 0.0341,
"step": 8
},
{
"epoch": 0.43636363636363634,
"grad_norm": 0.04746266454458237,
"learning_rate": 9e-05,
"loss": 0.0345,
"step": 9
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.040007542818784714,
"learning_rate": 0.0001,
"loss": 0.0324,
"step": 10
},
{
"epoch": 0.48484848484848486,
"eval_loss": 0.03000650927424431,
"eval_runtime": 10.3489,
"eval_samples_per_second": 4.831,
"eval_steps_per_second": 1.256,
"step": 10
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.03799016401171684,
"learning_rate": 9.99695413509548e-05,
"loss": 0.0331,
"step": 11
},
{
"epoch": 0.5818181818181818,
"grad_norm": 0.03196106478571892,
"learning_rate": 9.987820251299122e-05,
"loss": 0.029,
"step": 12
},
{
"epoch": 0.6303030303030303,
"grad_norm": 0.027229588478803635,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0257,
"step": 13
},
{
"epoch": 0.6787878787878788,
"grad_norm": 0.02768268622457981,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0215,
"step": 14
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.032983552664518356,
"learning_rate": 9.924038765061042e-05,
"loss": 0.024,
"step": 15
},
{
"epoch": 0.7272727272727273,
"eval_loss": 0.024439169093966484,
"eval_runtime": 10.352,
"eval_samples_per_second": 4.83,
"eval_steps_per_second": 1.256,
"step": 15
},
{
"epoch": 0.7757575757575758,
"grad_norm": 0.03327898308634758,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0321,
"step": 16
},
{
"epoch": 0.8242424242424242,
"grad_norm": 0.02506704442203045,
"learning_rate": 9.851478631379982e-05,
"loss": 0.0281,
"step": 17
},
{
"epoch": 0.8727272727272727,
"grad_norm": 0.023538140580058098,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0202,
"step": 18
},
{
"epoch": 0.9212121212121213,
"grad_norm": 0.02581767924129963,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0206,
"step": 19
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.021269701421260834,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0189,
"step": 20
},
{
"epoch": 0.9696969696969697,
"eval_loss": 0.0212231557816267,
"eval_runtime": 10.4147,
"eval_samples_per_second": 4.801,
"eval_steps_per_second": 1.248,
"step": 20
},
{
"epoch": 1.0363636363636364,
"grad_norm": 0.037210095673799515,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0317,
"step": 21
},
{
"epoch": 1.084848484848485,
"grad_norm": 0.02000698819756508,
"learning_rate": 9.567727288213005e-05,
"loss": 0.019,
"step": 22
},
{
"epoch": 1.1333333333333333,
"grad_norm": 0.020116321742534637,
"learning_rate": 9.493970231495835e-05,
"loss": 0.019,
"step": 23
},
{
"epoch": 1.1818181818181819,
"grad_norm": 0.022886989638209343,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0181,
"step": 24
},
{
"epoch": 1.2303030303030302,
"grad_norm": 0.023031054064631462,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0171,
"step": 25
},
{
"epoch": 1.2303030303030302,
"eval_loss": 0.019027845934033394,
"eval_runtime": 10.3362,
"eval_samples_per_second": 4.837,
"eval_steps_per_second": 1.258,
"step": 25
},
{
"epoch": 1.2787878787878788,
"grad_norm": 0.01873927377164364,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0182,
"step": 26
},
{
"epoch": 1.3272727272727272,
"grad_norm": 0.018605511635541916,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0163,
"step": 27
},
{
"epoch": 1.3757575757575757,
"grad_norm": 0.023363754153251648,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0177,
"step": 28
},
{
"epoch": 1.4242424242424243,
"grad_norm": 0.017733635380864143,
"learning_rate": 8.940053768033609e-05,
"loss": 0.014,
"step": 29
},
{
"epoch": 1.4727272727272727,
"grad_norm": 0.0225802194327116,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0146,
"step": 30
},
{
"epoch": 1.4727272727272727,
"eval_loss": 0.01733844168484211,
"eval_runtime": 10.3386,
"eval_samples_per_second": 4.836,
"eval_steps_per_second": 1.257,
"step": 30
},
{
"epoch": 1.5212121212121212,
"grad_norm": 0.018234577029943466,
"learning_rate": 8.715724127386972e-05,
"loss": 0.017,
"step": 31
},
{
"epoch": 1.5696969696969698,
"grad_norm": 0.024176109582185745,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0155,
"step": 32
},
{
"epoch": 1.6181818181818182,
"grad_norm": 0.023358961567282677,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0157,
"step": 33
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.025254715234041214,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0167,
"step": 34
},
{
"epoch": 1.7151515151515153,
"grad_norm": 0.021989861503243446,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0144,
"step": 35
},
{
"epoch": 1.7151515151515153,
"eval_loss": 0.016134196892380714,
"eval_runtime": 10.3976,
"eval_samples_per_second": 4.809,
"eval_steps_per_second": 1.25,
"step": 35
},
{
"epoch": 1.7636363636363637,
"grad_norm": 0.018357617780566216,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0162,
"step": 36
},
{
"epoch": 1.812121212121212,
"grad_norm": 0.021665558218955994,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0194,
"step": 37
},
{
"epoch": 1.8606060606060606,
"grad_norm": 0.020184990018606186,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0154,
"step": 38
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.016307702288031578,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0134,
"step": 39
},
{
"epoch": 1.9575757575757575,
"grad_norm": 0.01873622089624405,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0104,
"step": 40
},
{
"epoch": 1.9575757575757575,
"eval_loss": 0.015456685796380043,
"eval_runtime": 10.3404,
"eval_samples_per_second": 4.835,
"eval_steps_per_second": 1.257,
"step": 40
},
{
"epoch": 2.0242424242424244,
"grad_norm": 0.03928119316697121,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0277,
"step": 41
},
{
"epoch": 2.0727272727272728,
"grad_norm": 0.01585806906223297,
"learning_rate": 7.191855733945387e-05,
"loss": 0.012,
"step": 42
},
{
"epoch": 2.121212121212121,
"grad_norm": 0.022943127900362015,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0147,
"step": 43
},
{
"epoch": 2.16969696969697,
"grad_norm": 0.022961463779211044,
"learning_rate": 6.873032967079561e-05,
"loss": 0.014,
"step": 44
},
{
"epoch": 2.2181818181818183,
"grad_norm": 0.017532989382743835,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0143,
"step": 45
},
{
"epoch": 2.2181818181818183,
"eval_loss": 0.01516613457351923,
"eval_runtime": 10.3353,
"eval_samples_per_second": 4.838,
"eval_steps_per_second": 1.258,
"step": 45
},
{
"epoch": 2.2666666666666666,
"grad_norm": 0.026267599314451218,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0126,
"step": 46
},
{
"epoch": 2.315151515151515,
"grad_norm": 0.024150898680090904,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0118,
"step": 47
},
{
"epoch": 2.3636363636363638,
"grad_norm": 0.019730910658836365,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0121,
"step": 48
},
{
"epoch": 2.412121212121212,
"grad_norm": 0.02562836930155754,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0096,
"step": 49
},
{
"epoch": 2.4606060606060605,
"grad_norm": 0.01951598934829235,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0117,
"step": 50
},
{
"epoch": 2.4606060606060605,
"eval_loss": 0.014072530902922153,
"eval_runtime": 10.3868,
"eval_samples_per_second": 4.814,
"eval_steps_per_second": 1.252,
"step": 50
},
{
"epoch": 2.509090909090909,
"grad_norm": 0.02022629976272583,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0139,
"step": 51
},
{
"epoch": 2.5575757575757576,
"grad_norm": 0.01750958524644375,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0099,
"step": 52
},
{
"epoch": 2.606060606060606,
"grad_norm": 0.0212919432669878,
"learning_rate": 5.348782368720626e-05,
"loss": 0.013,
"step": 53
},
{
"epoch": 2.6545454545454543,
"grad_norm": 0.016065998002886772,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0093,
"step": 54
},
{
"epoch": 2.703030303030303,
"grad_norm": 0.018324071541428566,
"learning_rate": 5e-05,
"loss": 0.015,
"step": 55
},
{
"epoch": 2.703030303030303,
"eval_loss": 0.013632840476930141,
"eval_runtime": 10.3292,
"eval_samples_per_second": 4.841,
"eval_steps_per_second": 1.259,
"step": 55
},
{
"epoch": 2.7515151515151515,
"grad_norm": 0.02191258780658245,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0109,
"step": 56
},
{
"epoch": 2.8,
"grad_norm": 0.01748000457882881,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.0104,
"step": 57
},
{
"epoch": 2.8484848484848486,
"grad_norm": 0.01487227063626051,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0097,
"step": 58
},
{
"epoch": 2.896969696969697,
"grad_norm": 0.01992475800216198,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0108,
"step": 59
},
{
"epoch": 2.9454545454545453,
"grad_norm": 0.01630261354148388,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0092,
"step": 60
},
{
"epoch": 2.9454545454545453,
"eval_loss": 0.013053460977971554,
"eval_runtime": 10.3658,
"eval_samples_per_second": 4.824,
"eval_steps_per_second": 1.254,
"step": 60
},
{
"epoch": 3.012121212121212,
"grad_norm": 0.03835081681609154,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0172,
"step": 61
},
{
"epoch": 3.0606060606060606,
"grad_norm": 0.017339400947093964,
"learning_rate": 3.790390522001662e-05,
"loss": 0.008,
"step": 62
},
{
"epoch": 3.109090909090909,
"grad_norm": 0.01646747626364231,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0121,
"step": 63
},
{
"epoch": 3.1575757575757577,
"grad_norm": 0.01738414168357849,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.008,
"step": 64
},
{
"epoch": 3.206060606060606,
"grad_norm": 0.022966962307691574,
"learning_rate": 3.289899283371657e-05,
"loss": 0.008,
"step": 65
},
{
"epoch": 3.206060606060606,
"eval_loss": 0.012713730335235596,
"eval_runtime": 10.3449,
"eval_samples_per_second": 4.833,
"eval_steps_per_second": 1.257,
"step": 65
},
{
"epoch": 3.2545454545454544,
"grad_norm": 0.01976313628256321,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0096,
"step": 66
},
{
"epoch": 3.303030303030303,
"grad_norm": 0.015671931207180023,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0071,
"step": 67
},
{
"epoch": 3.3515151515151516,
"grad_norm": 0.02184975892305374,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.0092,
"step": 68
},
{
"epoch": 3.4,
"grad_norm": 0.021975932642817497,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.009,
"step": 69
},
{
"epoch": 3.4484848484848483,
"grad_norm": 0.02743859961628914,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0109,
"step": 70
},
{
"epoch": 3.4484848484848483,
"eval_loss": 0.012468294240534306,
"eval_runtime": 10.3407,
"eval_samples_per_second": 4.835,
"eval_steps_per_second": 1.257,
"step": 70
},
{
"epoch": 3.496969696969697,
"grad_norm": 0.02249360829591751,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0123,
"step": 71
},
{
"epoch": 3.5454545454545454,
"grad_norm": 0.019340503960847855,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0072,
"step": 72
},
{
"epoch": 3.5939393939393938,
"grad_norm": 0.017731212079524994,
"learning_rate": 2.061073738537635e-05,
"loss": 0.009,
"step": 73
},
{
"epoch": 3.6424242424242426,
"grad_norm": 0.017661720514297485,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0078,
"step": 74
},
{
"epoch": 3.690909090909091,
"grad_norm": 0.01668357476592064,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0085,
"step": 75
},
{
"epoch": 3.690909090909091,
"eval_loss": 0.012197648175060749,
"eval_runtime": 10.3397,
"eval_samples_per_second": 4.836,
"eval_steps_per_second": 1.257,
"step": 75
},
{
"epoch": 3.7393939393939393,
"grad_norm": 0.019251663237810135,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0089,
"step": 76
},
{
"epoch": 3.787878787878788,
"grad_norm": 0.018358884379267693,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0127,
"step": 77
},
{
"epoch": 3.8363636363636364,
"grad_norm": 0.018658000975847244,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0085,
"step": 78
},
{
"epoch": 3.8848484848484848,
"grad_norm": 0.01768352836370468,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0079,
"step": 79
},
{
"epoch": 3.9333333333333336,
"grad_norm": 0.017675764858722687,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0089,
"step": 80
},
{
"epoch": 3.9333333333333336,
"eval_loss": 0.011956109665334225,
"eval_runtime": 10.3374,
"eval_samples_per_second": 4.837,
"eval_steps_per_second": 1.258,
"step": 80
},
{
"epoch": 3.981818181818182,
"grad_norm": 0.04371407628059387,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.013,
"step": 81
},
{
"epoch": 4.048484848484849,
"grad_norm": 0.01942707970738411,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0092,
"step": 82
},
{
"epoch": 4.096969696969697,
"grad_norm": 0.014996436424553394,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0064,
"step": 83
},
{
"epoch": 4.1454545454545455,
"grad_norm": 0.01869269274175167,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0086,
"step": 84
},
{
"epoch": 4.193939393939394,
"grad_norm": 0.017618168145418167,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0074,
"step": 85
},
{
"epoch": 4.193939393939394,
"eval_loss": 0.011830544099211693,
"eval_runtime": 10.3339,
"eval_samples_per_second": 4.838,
"eval_steps_per_second": 1.258,
"step": 85
},
{
"epoch": 4.242424242424242,
"grad_norm": 0.016551963984966278,
"learning_rate": 5.852620357053651e-06,
"loss": 0.009,
"step": 86
},
{
"epoch": 4.290909090909091,
"grad_norm": 0.01904563419520855,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0074,
"step": 87
},
{
"epoch": 4.33939393939394,
"grad_norm": 0.02098240703344345,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0094,
"step": 88
},
{
"epoch": 4.387878787878788,
"grad_norm": 0.015075408853590488,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.0084,
"step": 89
},
{
"epoch": 4.4363636363636365,
"grad_norm": 0.016622379422187805,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0074,
"step": 90
},
{
"epoch": 4.4363636363636365,
"eval_loss": 0.011814383789896965,
"eval_runtime": 10.356,
"eval_samples_per_second": 4.828,
"eval_steps_per_second": 1.255,
"step": 90
},
{
"epoch": 4.484848484848484,
"grad_norm": 0.017493437975645065,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0087,
"step": 91
},
{
"epoch": 4.533333333333333,
"grad_norm": 0.01581619866192341,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0073,
"step": 92
},
{
"epoch": 4.581818181818182,
"grad_norm": 0.020205620676279068,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0075,
"step": 93
},
{
"epoch": 4.63030303030303,
"grad_norm": 0.016090713441371918,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0071,
"step": 94
},
{
"epoch": 4.678787878787879,
"grad_norm": 0.014329814352095127,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0066,
"step": 95
},
{
"epoch": 4.678787878787879,
"eval_loss": 0.011838787235319614,
"eval_runtime": 10.3353,
"eval_samples_per_second": 4.838,
"eval_steps_per_second": 1.258,
"step": 95
},
{
"epoch": 4.7272727272727275,
"grad_norm": 0.015449507161974907,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0063,
"step": 96
},
{
"epoch": 4.775757575757575,
"grad_norm": 0.016315339133143425,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0076,
"step": 97
},
{
"epoch": 4.824242424242424,
"grad_norm": 0.018415048718452454,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0103,
"step": 98
},
{
"epoch": 4.872727272727273,
"grad_norm": 0.014639717526733875,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0057,
"step": 99
},
{
"epoch": 4.921212121212121,
"grad_norm": 0.018388710916042328,
"learning_rate": 0.0,
"loss": 0.0065,
"step": 100
},
{
"epoch": 4.921212121212121,
"eval_loss": 0.011823242530226707,
"eval_runtime": 10.3329,
"eval_samples_per_second": 4.839,
"eval_steps_per_second": 1.258,
"step": 100
},
{
"epoch": 4.921212121212121,
"step": 100,
"total_flos": 2.442632501894185e+17,
"train_loss": 0.016519791511818767,
"train_runtime": 2009.2463,
"train_samples_per_second": 1.637,
"train_steps_per_second": 0.05
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.442632501894185e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}