math-vinallama-7b-chat / trainer_state.json
Namronaldo2004's picture
Update fine-tuned model
68b3bbd
raw
history blame
16.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 0.600925862789154,
"learning_rate": 4e-05,
"loss": 0.4307,
"step": 1
},
{
"epoch": 0.04,
"grad_norm": 0.6059685349464417,
"learning_rate": 8e-05,
"loss": 0.4353,
"step": 2
},
{
"epoch": 0.06,
"grad_norm": 0.5777328014373779,
"learning_rate": 0.00012,
"loss": 0.421,
"step": 3
},
{
"epoch": 0.08,
"grad_norm": 0.548288106918335,
"learning_rate": 0.00016,
"loss": 0.4029,
"step": 4
},
{
"epoch": 0.1,
"grad_norm": 0.4978249967098236,
"learning_rate": 0.0002,
"loss": 0.4102,
"step": 5
},
{
"epoch": 0.12,
"grad_norm": 0.5470420718193054,
"learning_rate": 0.00019994532573409262,
"loss": 0.4123,
"step": 6
},
{
"epoch": 0.14,
"grad_norm": 0.5333985686302185,
"learning_rate": 0.00019978136272187747,
"loss": 0.3966,
"step": 7
},
{
"epoch": 0.16,
"grad_norm": 0.4586232304573059,
"learning_rate": 0.00019950829025450114,
"loss": 0.377,
"step": 8
},
{
"epoch": 0.18,
"grad_norm": 0.49000784754753113,
"learning_rate": 0.00019912640693269752,
"loss": 0.3785,
"step": 9
},
{
"epoch": 0.2,
"grad_norm": 0.45979875326156616,
"learning_rate": 0.00019863613034027224,
"loss": 0.3957,
"step": 10
},
{
"epoch": 0.22,
"grad_norm": 0.4552418887615204,
"learning_rate": 0.00019803799658748094,
"loss": 0.3921,
"step": 11
},
{
"epoch": 0.24,
"grad_norm": 0.4663061201572418,
"learning_rate": 0.0001973326597248006,
"loss": 0.3896,
"step": 12
},
{
"epoch": 0.26,
"grad_norm": 0.4285649359226227,
"learning_rate": 0.00019652089102773488,
"loss": 0.349,
"step": 13
},
{
"epoch": 0.28,
"grad_norm": 0.45254892110824585,
"learning_rate": 0.00019560357815343577,
"loss": 0.3685,
"step": 14
},
{
"epoch": 0.3,
"grad_norm": 0.4699627459049225,
"learning_rate": 0.00019458172417006347,
"loss": 0.3765,
"step": 15
},
{
"epoch": 0.32,
"grad_norm": 0.4629937708377838,
"learning_rate": 0.0001934564464599461,
"loss": 0.3567,
"step": 16
},
{
"epoch": 0.34,
"grad_norm": 0.44340917468070984,
"learning_rate": 0.00019222897549773848,
"loss": 0.3742,
"step": 17
},
{
"epoch": 0.36,
"grad_norm": 0.48120179772377014,
"learning_rate": 0.00019090065350491626,
"loss": 0.3507,
"step": 18
},
{
"epoch": 0.38,
"grad_norm": 0.4626652002334595,
"learning_rate": 0.00018947293298207635,
"loss": 0.3637,
"step": 19
},
{
"epoch": 0.4,
"grad_norm": 0.4551631808280945,
"learning_rate": 0.0001879473751206489,
"loss": 0.3588,
"step": 20
},
{
"epoch": 0.42,
"grad_norm": 0.47978681325912476,
"learning_rate": 0.00018632564809575742,
"loss": 0.3631,
"step": 21
},
{
"epoch": 0.44,
"grad_norm": 0.44357830286026,
"learning_rate": 0.00018460952524209355,
"loss": 0.3634,
"step": 22
},
{
"epoch": 0.46,
"grad_norm": 0.4979308247566223,
"learning_rate": 0.00018280088311480201,
"loss": 0.3497,
"step": 23
},
{
"epoch": 0.48,
"grad_norm": 0.4785916805267334,
"learning_rate": 0.00018090169943749476,
"loss": 0.3506,
"step": 24
},
{
"epoch": 0.5,
"grad_norm": 0.45637139678001404,
"learning_rate": 0.00017891405093963938,
"loss": 0.3621,
"step": 25
},
{
"epoch": 0.52,
"grad_norm": 0.4222208261489868,
"learning_rate": 0.00017684011108568592,
"loss": 0.333,
"step": 26
},
{
"epoch": 0.54,
"grad_norm": 0.4473680555820465,
"learning_rate": 0.0001746821476984154,
"loss": 0.3384,
"step": 27
},
{
"epoch": 0.56,
"grad_norm": 0.45950424671173096,
"learning_rate": 0.00017244252047910892,
"loss": 0.3396,
"step": 28
},
{
"epoch": 0.58,
"grad_norm": 0.4503507614135742,
"learning_rate": 0.00017012367842724887,
"loss": 0.3907,
"step": 29
},
{
"epoch": 0.6,
"grad_norm": 0.4551246166229248,
"learning_rate": 0.00016772815716257412,
"loss": 0.3413,
"step": 30
},
{
"epoch": 0.62,
"grad_norm": 0.42266467213630676,
"learning_rate": 0.00016525857615241687,
"loss": 0.3402,
"step": 31
},
{
"epoch": 0.64,
"grad_norm": 0.473186194896698,
"learning_rate": 0.0001627176358473537,
"loss": 0.3515,
"step": 32
},
{
"epoch": 0.66,
"grad_norm": 0.42668187618255615,
"learning_rate": 0.00016010811472830252,
"loss": 0.3148,
"step": 33
},
{
"epoch": 0.68,
"grad_norm": 0.424800306558609,
"learning_rate": 0.00015743286626829437,
"loss": 0.3224,
"step": 34
},
{
"epoch": 0.7,
"grad_norm": 0.43106356263160706,
"learning_rate": 0.00015469481581224272,
"loss": 0.3143,
"step": 35
},
{
"epoch": 0.72,
"grad_norm": 0.4076085090637207,
"learning_rate": 0.00015189695737812152,
"loss": 0.3329,
"step": 36
},
{
"epoch": 0.74,
"grad_norm": 0.4057788550853729,
"learning_rate": 0.00014904235038305083,
"loss": 0.3389,
"step": 37
},
{
"epoch": 0.76,
"grad_norm": 0.4162575900554657,
"learning_rate": 0.0001461341162978688,
"loss": 0.3268,
"step": 38
},
{
"epoch": 0.78,
"grad_norm": 0.42211902141571045,
"learning_rate": 0.00014317543523384928,
"loss": 0.2971,
"step": 39
},
{
"epoch": 0.8,
"grad_norm": 0.42816001176834106,
"learning_rate": 0.00014016954246529696,
"loss": 0.3303,
"step": 40
},
{
"epoch": 0.82,
"grad_norm": 0.4197749197483063,
"learning_rate": 0.00013711972489182208,
"loss": 0.3443,
"step": 41
},
{
"epoch": 0.84,
"grad_norm": 0.41594424843788147,
"learning_rate": 0.00013402931744416433,
"loss": 0.3417,
"step": 42
},
{
"epoch": 0.86,
"grad_norm": 0.41279336810112,
"learning_rate": 0.00013090169943749476,
"loss": 0.3252,
"step": 43
},
{
"epoch": 0.88,
"grad_norm": 0.42070260643959045,
"learning_rate": 0.00012774029087618446,
"loss": 0.3107,
"step": 44
},
{
"epoch": 0.9,
"grad_norm": 0.4160784184932709,
"learning_rate": 0.00012454854871407994,
"loss": 0.3253,
"step": 45
},
{
"epoch": 0.92,
"grad_norm": 0.4144185483455658,
"learning_rate": 0.0001213299630743747,
"loss": 0.3251,
"step": 46
},
{
"epoch": 0.94,
"grad_norm": 0.3940325081348419,
"learning_rate": 0.000118088053433211,
"loss": 0.3234,
"step": 47
},
{
"epoch": 0.96,
"grad_norm": 0.40852218866348267,
"learning_rate": 0.0001148263647711842,
"loss": 0.3405,
"step": 48
},
{
"epoch": 0.98,
"grad_norm": 0.4145854711532593,
"learning_rate": 0.00011154846369695863,
"loss": 0.3237,
"step": 49
},
{
"epoch": 1.0,
"grad_norm": 0.40923136472702026,
"learning_rate": 0.00010825793454723325,
"loss": 0.3401,
"step": 50
},
{
"epoch": 1.02,
"grad_norm": 0.33250558376312256,
"learning_rate": 0.00010495837546732224,
"loss": 0.2383,
"step": 51
},
{
"epoch": 1.04,
"grad_norm": 0.3187699019908905,
"learning_rate": 0.00010165339447663587,
"loss": 0.2129,
"step": 52
},
{
"epoch": 1.06,
"grad_norm": 0.3277425467967987,
"learning_rate": 9.834660552336415e-05,
"loss": 0.207,
"step": 53
},
{
"epoch": 1.08,
"grad_norm": 0.33040741086006165,
"learning_rate": 9.504162453267777e-05,
"loss": 0.1989,
"step": 54
},
{
"epoch": 1.1,
"grad_norm": 0.33165445923805237,
"learning_rate": 9.174206545276677e-05,
"loss": 0.224,
"step": 55
},
{
"epoch": 1.12,
"grad_norm": 0.3344186842441559,
"learning_rate": 8.845153630304139e-05,
"loss": 0.2011,
"step": 56
},
{
"epoch": 1.1400000000000001,
"grad_norm": 0.3419267535209656,
"learning_rate": 8.517363522881579e-05,
"loss": 0.2195,
"step": 57
},
{
"epoch": 1.16,
"grad_norm": 0.3256666362285614,
"learning_rate": 8.191194656678904e-05,
"loss": 0.1901,
"step": 58
},
{
"epoch": 1.18,
"grad_norm": 0.3259730935096741,
"learning_rate": 7.867003692562534e-05,
"loss": 0.2043,
"step": 59
},
{
"epoch": 1.2,
"grad_norm": 0.3260168433189392,
"learning_rate": 7.54514512859201e-05,
"loss": 0.2002,
"step": 60
},
{
"epoch": 1.22,
"grad_norm": 0.34458276629447937,
"learning_rate": 7.225970912381556e-05,
"loss": 0.2031,
"step": 61
},
{
"epoch": 1.24,
"grad_norm": 0.3367977440357208,
"learning_rate": 6.909830056250527e-05,
"loss": 0.2018,
"step": 62
},
{
"epoch": 1.26,
"grad_norm": 0.36229151487350464,
"learning_rate": 6.59706825558357e-05,
"loss": 0.1987,
"step": 63
},
{
"epoch": 1.28,
"grad_norm": 0.34255215525627136,
"learning_rate": 6.28802751081779e-05,
"loss": 0.1906,
"step": 64
},
{
"epoch": 1.3,
"grad_norm": 0.3520965576171875,
"learning_rate": 5.983045753470308e-05,
"loss": 0.1916,
"step": 65
},
{
"epoch": 1.32,
"grad_norm": 0.3631666600704193,
"learning_rate": 5.6824564766150726e-05,
"loss": 0.2055,
"step": 66
},
{
"epoch": 1.34,
"grad_norm": 0.3751111626625061,
"learning_rate": 5.386588370213124e-05,
"loss": 0.1894,
"step": 67
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.3539924919605255,
"learning_rate": 5.095764961694922e-05,
"loss": 0.1949,
"step": 68
},
{
"epoch": 1.38,
"grad_norm": 0.35575294494628906,
"learning_rate": 4.810304262187852e-05,
"loss": 0.1947,
"step": 69
},
{
"epoch": 1.4,
"grad_norm": 0.3652094602584839,
"learning_rate": 4.530518418775733e-05,
"loss": 0.1937,
"step": 70
},
{
"epoch": 1.42,
"grad_norm": 0.3642243444919586,
"learning_rate": 4.256713373170564e-05,
"loss": 0.2066,
"step": 71
},
{
"epoch": 1.44,
"grad_norm": 0.37013036012649536,
"learning_rate": 3.9891885271697496e-05,
"loss": 0.2062,
"step": 72
},
{
"epoch": 1.46,
"grad_norm": 0.35074442625045776,
"learning_rate": 3.7282364152646297e-05,
"loss": 0.2015,
"step": 73
},
{
"epoch": 1.48,
"grad_norm": 0.35445520281791687,
"learning_rate": 3.4741423847583134e-05,
"loss": 0.1934,
"step": 74
},
{
"epoch": 1.5,
"grad_norm": 0.3767736852169037,
"learning_rate": 3.227184283742591e-05,
"loss": 0.1955,
"step": 75
},
{
"epoch": 1.52,
"grad_norm": 0.3581729829311371,
"learning_rate": 2.9876321572751144e-05,
"loss": 0.1882,
"step": 76
},
{
"epoch": 1.54,
"grad_norm": 0.362015038728714,
"learning_rate": 2.7557479520891104e-05,
"loss": 0.2056,
"step": 77
},
{
"epoch": 1.56,
"grad_norm": 0.35143327713012695,
"learning_rate": 2.5317852301584643e-05,
"loss": 0.1957,
"step": 78
},
{
"epoch": 1.58,
"grad_norm": 0.3580865263938904,
"learning_rate": 2.315988891431412e-05,
"loss": 0.197,
"step": 79
},
{
"epoch": 1.6,
"grad_norm": 0.3541586399078369,
"learning_rate": 2.1085949060360654e-05,
"loss": 0.2022,
"step": 80
},
{
"epoch": 1.62,
"grad_norm": 0.36059048771858215,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.1855,
"step": 81
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.3643834888935089,
"learning_rate": 1.7199116885197995e-05,
"loss": 0.1812,
"step": 82
},
{
"epoch": 1.6600000000000001,
"grad_norm": 0.3523213863372803,
"learning_rate": 1.5390474757906446e-05,
"loss": 0.198,
"step": 83
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.3479655981063843,
"learning_rate": 1.3674351904242611e-05,
"loss": 0.1913,
"step": 84
},
{
"epoch": 1.7,
"grad_norm": 0.3516225218772888,
"learning_rate": 1.2052624879351104e-05,
"loss": 0.1936,
"step": 85
},
{
"epoch": 1.72,
"grad_norm": 0.3603803217411041,
"learning_rate": 1.0527067017923654e-05,
"loss": 0.182,
"step": 86
},
{
"epoch": 1.74,
"grad_norm": 0.3666897416114807,
"learning_rate": 9.09934649508375e-06,
"loss": 0.198,
"step": 87
},
{
"epoch": 1.76,
"grad_norm": 0.347070574760437,
"learning_rate": 7.771024502261526e-06,
"loss": 0.1819,
"step": 88
},
{
"epoch": 1.78,
"grad_norm": 0.366550087928772,
"learning_rate": 6.543553540053926e-06,
"loss": 0.1958,
"step": 89
},
{
"epoch": 1.8,
"grad_norm": 0.3641619384288788,
"learning_rate": 5.418275829936537e-06,
"loss": 0.193,
"step": 90
},
{
"epoch": 1.8199999999999998,
"grad_norm": 0.3448559045791626,
"learning_rate": 4.3964218465642355e-06,
"loss": 0.183,
"step": 91
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.3684370517730713,
"learning_rate": 3.4791089722651436e-06,
"loss": 0.2036,
"step": 92
},
{
"epoch": 1.8599999999999999,
"grad_norm": 0.3479262590408325,
"learning_rate": 2.667340275199426e-06,
"loss": 0.1871,
"step": 93
},
{
"epoch": 1.88,
"grad_norm": 0.3530682325363159,
"learning_rate": 1.9620034125190644e-06,
"loss": 0.1975,
"step": 94
},
{
"epoch": 1.9,
"grad_norm": 0.3446263074874878,
"learning_rate": 1.3638696597277679e-06,
"loss": 0.1791,
"step": 95
},
{
"epoch": 1.92,
"grad_norm": 0.341688334941864,
"learning_rate": 8.735930673024806e-07,
"loss": 0.1954,
"step": 96
},
{
"epoch": 1.94,
"grad_norm": 0.3435825705528259,
"learning_rate": 4.917097454988584e-07,
"loss": 0.1872,
"step": 97
},
{
"epoch": 1.96,
"grad_norm": 0.35581129789352417,
"learning_rate": 2.1863727812254653e-07,
"loss": 0.1927,
"step": 98
},
{
"epoch": 1.98,
"grad_norm": 0.362829327583313,
"learning_rate": 5.467426590739511e-08,
"loss": 0.1888,
"step": 99
},
{
"epoch": 2.0,
"grad_norm": 0.35802093148231506,
"learning_rate": 0.0,
"loss": 0.1933,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.504376951971676e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}