math-vinallama-7b-chat / trainer_state.json
Namronaldo2004's picture
Update fine-tuned model
ffc93e8
raw
history blame
20.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 125,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 1.570319414138794,
"learning_rate": 2.857142857142857e-05,
"loss": 0.9956,
"step": 1
},
{
"epoch": 0.08,
"grad_norm": 1.5578975677490234,
"learning_rate": 5.714285714285714e-05,
"loss": 0.946,
"step": 2
},
{
"epoch": 0.12,
"grad_norm": 1.444091796875,
"learning_rate": 8.571428571428571e-05,
"loss": 0.9214,
"step": 3
},
{
"epoch": 0.16,
"grad_norm": 1.2263134717941284,
"learning_rate": 0.00011428571428571428,
"loss": 0.8926,
"step": 4
},
{
"epoch": 0.2,
"grad_norm": 0.912525475025177,
"learning_rate": 0.00014285714285714287,
"loss": 0.8399,
"step": 5
},
{
"epoch": 0.24,
"grad_norm": 0.7677531242370605,
"learning_rate": 0.00017142857142857143,
"loss": 0.7987,
"step": 6
},
{
"epoch": 0.28,
"grad_norm": 0.6140649914741516,
"learning_rate": 0.0002,
"loss": 0.7481,
"step": 7
},
{
"epoch": 0.32,
"grad_norm": 0.8730568885803223,
"learning_rate": 0.00019996456111234527,
"loss": 0.7544,
"step": 8
},
{
"epoch": 0.36,
"grad_norm": 0.7831456661224365,
"learning_rate": 0.0001998582695676762,
"loss": 0.7009,
"step": 9
},
{
"epoch": 0.4,
"grad_norm": 0.512580394744873,
"learning_rate": 0.000199681200703075,
"loss": 0.6704,
"step": 10
},
{
"epoch": 0.44,
"grad_norm": 0.37298211455345154,
"learning_rate": 0.00019943348002101371,
"loss": 0.7061,
"step": 11
},
{
"epoch": 0.48,
"grad_norm": 0.3507251739501953,
"learning_rate": 0.00019911528310040074,
"loss": 0.6529,
"step": 12
},
{
"epoch": 0.52,
"grad_norm": 0.3471398651599884,
"learning_rate": 0.00019872683547213446,
"loss": 0.6738,
"step": 13
},
{
"epoch": 0.56,
"grad_norm": 0.33570075035095215,
"learning_rate": 0.00019826841245925212,
"loss": 0.6441,
"step": 14
},
{
"epoch": 0.6,
"grad_norm": 0.31052011251449585,
"learning_rate": 0.00019774033898178667,
"loss": 0.6061,
"step": 15
},
{
"epoch": 0.64,
"grad_norm": 0.3227557837963104,
"learning_rate": 0.00019714298932647098,
"loss": 0.6389,
"step": 16
},
{
"epoch": 0.68,
"grad_norm": 0.32108747959136963,
"learning_rate": 0.0001964767868814516,
"loss": 0.6036,
"step": 17
},
{
"epoch": 0.72,
"grad_norm": 0.31963539123535156,
"learning_rate": 0.00019574220383620055,
"loss": 0.6126,
"step": 18
},
{
"epoch": 0.76,
"grad_norm": 0.3213265538215637,
"learning_rate": 0.00019493976084683813,
"loss": 0.6219,
"step": 19
},
{
"epoch": 0.8,
"grad_norm": 0.32397329807281494,
"learning_rate": 0.00019407002666710336,
"loss": 0.6241,
"step": 20
},
{
"epoch": 0.84,
"grad_norm": 0.31370866298675537,
"learning_rate": 0.00019313361774523385,
"loss": 0.5877,
"step": 21
},
{
"epoch": 0.88,
"grad_norm": 0.3048774302005768,
"learning_rate": 0.00019213119778704128,
"loss": 0.5939,
"step": 22
},
{
"epoch": 0.92,
"grad_norm": 0.31822916865348816,
"learning_rate": 0.00019106347728549135,
"loss": 0.5762,
"step": 23
},
{
"epoch": 0.96,
"grad_norm": 0.309684693813324,
"learning_rate": 0.00018993121301712193,
"loss": 0.5746,
"step": 24
},
{
"epoch": 1.0,
"grad_norm": 0.3195765018463135,
"learning_rate": 0.00018873520750565718,
"loss": 0.5715,
"step": 25
},
{
"epoch": 1.04,
"grad_norm": 0.3123801052570343,
"learning_rate": 0.00018747630845319612,
"loss": 0.4747,
"step": 26
},
{
"epoch": 1.08,
"grad_norm": 0.31075745820999146,
"learning_rate": 0.0001861554081393806,
"loss": 0.4722,
"step": 27
},
{
"epoch": 1.12,
"grad_norm": 0.31425103545188904,
"learning_rate": 0.0001847734427889671,
"loss": 0.4374,
"step": 28
},
{
"epoch": 1.16,
"grad_norm": 0.28231778740882874,
"learning_rate": 0.0001833313919082515,
"loss": 0.4373,
"step": 29
},
{
"epoch": 1.2,
"grad_norm": 0.30015143752098083,
"learning_rate": 0.0001818302775908169,
"loss": 0.4551,
"step": 30
},
{
"epoch": 1.24,
"grad_norm": 0.28502359986305237,
"learning_rate": 0.00018027116379309638,
"loss": 0.4322,
"step": 31
},
{
"epoch": 1.28,
"grad_norm": 0.28252413868904114,
"learning_rate": 0.00017865515558026428,
"loss": 0.4234,
"step": 32
},
{
"epoch": 1.32,
"grad_norm": 0.30743300914764404,
"learning_rate": 0.00017698339834299061,
"loss": 0.444,
"step": 33
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.2907680869102478,
"learning_rate": 0.00017525707698561385,
"loss": 0.4154,
"step": 34
},
{
"epoch": 1.4,
"grad_norm": 0.30981379747390747,
"learning_rate": 0.00017347741508630672,
"loss": 0.4154,
"step": 35
},
{
"epoch": 1.44,
"grad_norm": 0.2963266968727112,
"learning_rate": 0.00017164567402983152,
"loss": 0.404,
"step": 36
},
{
"epoch": 1.48,
"grad_norm": 0.3006102740764618,
"learning_rate": 0.0001697631521134985,
"loss": 0.4141,
"step": 37
},
{
"epoch": 1.52,
"grad_norm": 0.3143654763698578,
"learning_rate": 0.00016783118362696163,
"loss": 0.3708,
"step": 38
},
{
"epoch": 1.56,
"grad_norm": 0.3136065602302551,
"learning_rate": 0.00016585113790650388,
"loss": 0.4031,
"step": 39
},
{
"epoch": 1.6,
"grad_norm": 0.31369641423225403,
"learning_rate": 0.00016382441836448202,
"loss": 0.3937,
"step": 40
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.3261353671550751,
"learning_rate": 0.0001617524614946192,
"loss": 0.3931,
"step": 41
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.3178551197052002,
"learning_rate": 0.00015963673585385016,
"loss": 0.3829,
"step": 42
},
{
"epoch": 1.72,
"grad_norm": 0.3346520960330963,
"learning_rate": 0.0001574787410214407,
"loss": 0.3792,
"step": 43
},
{
"epoch": 1.76,
"grad_norm": 0.3492076098918915,
"learning_rate": 0.00015528000653611935,
"loss": 0.3777,
"step": 44
},
{
"epoch": 1.8,
"grad_norm": 0.3226877748966217,
"learning_rate": 0.00015304209081197425,
"loss": 0.379,
"step": 45
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.31526848673820496,
"learning_rate": 0.000150766580033884,
"loss": 0.3848,
"step": 46
},
{
"epoch": 1.88,
"grad_norm": 0.3140419125556946,
"learning_rate": 0.00014845508703326504,
"loss": 0.3542,
"step": 47
},
{
"epoch": 1.92,
"grad_norm": 0.32493019104003906,
"learning_rate": 0.0001461092501449326,
"loss": 0.3842,
"step": 48
},
{
"epoch": 1.96,
"grad_norm": 0.33945709466934204,
"learning_rate": 0.00014373073204588556,
"loss": 0.3677,
"step": 49
},
{
"epoch": 2.0,
"grad_norm": 0.3231065571308136,
"learning_rate": 0.00014132121857683783,
"loss": 0.3752,
"step": 50
},
{
"epoch": 2.04,
"grad_norm": 0.2973474860191345,
"learning_rate": 0.00013888241754733208,
"loss": 0.2789,
"step": 51
},
{
"epoch": 2.08,
"grad_norm": 0.30944961309432983,
"learning_rate": 0.00013641605752528224,
"loss": 0.279,
"step": 52
},
{
"epoch": 2.12,
"grad_norm": 0.3238925337791443,
"learning_rate": 0.00013392388661180303,
"loss": 0.2919,
"step": 53
},
{
"epoch": 2.16,
"grad_norm": 0.30867430567741394,
"learning_rate": 0.0001314076712021949,
"loss": 0.2998,
"step": 54
},
{
"epoch": 2.2,
"grad_norm": 0.29136329889297485,
"learning_rate": 0.0001288691947339621,
"loss": 0.279,
"step": 55
},
{
"epoch": 2.24,
"grad_norm": 0.2846209704875946,
"learning_rate": 0.00012631025642275212,
"loss": 0.2826,
"step": 56
},
{
"epoch": 2.2800000000000002,
"grad_norm": 0.29411131143569946,
"learning_rate": 0.0001237326699871115,
"loss": 0.2693,
"step": 57
},
{
"epoch": 2.32,
"grad_norm": 0.2946978211402893,
"learning_rate": 0.00012113826236296244,
"loss": 0.2777,
"step": 58
},
{
"epoch": 2.36,
"grad_norm": 0.30307185649871826,
"learning_rate": 0.00011852887240871145,
"loss": 0.2673,
"step": 59
},
{
"epoch": 2.4,
"grad_norm": 0.31018176674842834,
"learning_rate": 0.00011590634960190721,
"loss": 0.2658,
"step": 60
},
{
"epoch": 2.44,
"grad_norm": 0.30534908175468445,
"learning_rate": 0.00011327255272837221,
"loss": 0.2669,
"step": 61
},
{
"epoch": 2.48,
"grad_norm": 0.3161245286464691,
"learning_rate": 0.00011062934856473655,
"loss": 0.2793,
"step": 62
},
{
"epoch": 2.52,
"grad_norm": 0.3256876766681671,
"learning_rate": 0.00010797861055530831,
"loss": 0.2904,
"step": 63
},
{
"epoch": 2.56,
"grad_norm": 0.32278570532798767,
"learning_rate": 0.00010532221748421787,
"loss": 0.2785,
"step": 64
},
{
"epoch": 2.6,
"grad_norm": 0.3227723240852356,
"learning_rate": 0.00010266205214377748,
"loss": 0.2655,
"step": 65
},
{
"epoch": 2.64,
"grad_norm": 0.336511492729187,
"learning_rate": 0.0001,
"loss": 0.264,
"step": 66
},
{
"epoch": 2.68,
"grad_norm": 0.33010682463645935,
"learning_rate": 9.733794785622253e-05,
"loss": 0.2705,
"step": 67
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.33406832814216614,
"learning_rate": 9.467778251578217e-05,
"loss": 0.2587,
"step": 68
},
{
"epoch": 2.76,
"grad_norm": 0.31980621814727783,
"learning_rate": 9.202138944469168e-05,
"loss": 0.2554,
"step": 69
},
{
"epoch": 2.8,
"grad_norm": 0.3083789348602295,
"learning_rate": 8.937065143526347e-05,
"loss": 0.2594,
"step": 70
},
{
"epoch": 2.84,
"grad_norm": 0.34898000955581665,
"learning_rate": 8.672744727162781e-05,
"loss": 0.2741,
"step": 71
},
{
"epoch": 2.88,
"grad_norm": 0.3146231174468994,
"learning_rate": 8.409365039809281e-05,
"loss": 0.2565,
"step": 72
},
{
"epoch": 2.92,
"grad_norm": 0.3152382969856262,
"learning_rate": 8.147112759128859e-05,
"loss": 0.2492,
"step": 73
},
{
"epoch": 2.96,
"grad_norm": 0.32025042176246643,
"learning_rate": 7.886173763703757e-05,
"loss": 0.2589,
"step": 74
},
{
"epoch": 3.0,
"grad_norm": 0.3296203911304474,
"learning_rate": 7.626733001288851e-05,
"loss": 0.2622,
"step": 75
},
{
"epoch": 3.04,
"grad_norm": 0.29241126775741577,
"learning_rate": 7.368974357724789e-05,
"loss": 0.2021,
"step": 76
},
{
"epoch": 3.08,
"grad_norm": 0.2925822138786316,
"learning_rate": 7.113080526603792e-05,
"loss": 0.215,
"step": 77
},
{
"epoch": 3.12,
"grad_norm": 0.29066669940948486,
"learning_rate": 6.859232879780515e-05,
"loss": 0.2078,
"step": 78
},
{
"epoch": 3.16,
"grad_norm": 0.3053090274333954,
"learning_rate": 6.607611338819697e-05,
"loss": 0.2112,
"step": 79
},
{
"epoch": 3.2,
"grad_norm": 0.27980366349220276,
"learning_rate": 6.358394247471778e-05,
"loss": 0.1845,
"step": 80
},
{
"epoch": 3.24,
"grad_norm": 0.2979828715324402,
"learning_rate": 6.111758245266794e-05,
"loss": 0.2063,
"step": 81
},
{
"epoch": 3.2800000000000002,
"grad_norm": 0.3061892092227936,
"learning_rate": 5.867878142316221e-05,
"loss": 0.2004,
"step": 82
},
{
"epoch": 3.32,
"grad_norm": 0.2933078408241272,
"learning_rate": 5.626926795411447e-05,
"loss": 0.2112,
"step": 83
},
{
"epoch": 3.36,
"grad_norm": 0.27492034435272217,
"learning_rate": 5.38907498550674e-05,
"loss": 0.1915,
"step": 84
},
{
"epoch": 3.4,
"grad_norm": 0.28737884759902954,
"learning_rate": 5.1544912966734994e-05,
"loss": 0.2102,
"step": 85
},
{
"epoch": 3.44,
"grad_norm": 0.3012617528438568,
"learning_rate": 4.9233419966116036e-05,
"loss": 0.2003,
"step": 86
},
{
"epoch": 3.48,
"grad_norm": 0.2871868908405304,
"learning_rate": 4.695790918802576e-05,
"loss": 0.1934,
"step": 87
},
{
"epoch": 3.52,
"grad_norm": 0.29408469796180725,
"learning_rate": 4.47199934638807e-05,
"loss": 0.2067,
"step": 88
},
{
"epoch": 3.56,
"grad_norm": 0.27776145935058594,
"learning_rate": 4.252125897855932e-05,
"loss": 0.194,
"step": 89
},
{
"epoch": 3.6,
"grad_norm": 0.2886578440666199,
"learning_rate": 4.036326414614985e-05,
"loss": 0.1914,
"step": 90
},
{
"epoch": 3.64,
"grad_norm": 0.3070749342441559,
"learning_rate": 3.824753850538082e-05,
"loss": 0.2119,
"step": 91
},
{
"epoch": 3.68,
"grad_norm": 0.30975136160850525,
"learning_rate": 3.617558163551802e-05,
"loss": 0.1958,
"step": 92
},
{
"epoch": 3.7199999999999998,
"grad_norm": 0.31414180994033813,
"learning_rate": 3.414886209349615e-05,
"loss": 0.1937,
"step": 93
},
{
"epoch": 3.76,
"grad_norm": 0.31353330612182617,
"learning_rate": 3.216881637303839e-05,
"loss": 0.1946,
"step": 94
},
{
"epoch": 3.8,
"grad_norm": 0.29970917105674744,
"learning_rate": 3.0236847886501542e-05,
"loss": 0.2061,
"step": 95
},
{
"epoch": 3.84,
"grad_norm": 0.30475103855133057,
"learning_rate": 2.8354325970168484e-05,
"loss": 0.1884,
"step": 96
},
{
"epoch": 3.88,
"grad_norm": 0.2989894449710846,
"learning_rate": 2.6522584913693294e-05,
"loss": 0.1924,
"step": 97
},
{
"epoch": 3.92,
"grad_norm": 0.30253222584724426,
"learning_rate": 2.4742923014386156e-05,
"loss": 0.1928,
"step": 98
},
{
"epoch": 3.96,
"grad_norm": 0.30798980593681335,
"learning_rate": 2.301660165700936e-05,
"loss": 0.2003,
"step": 99
},
{
"epoch": 4.0,
"grad_norm": 0.29617148637771606,
"learning_rate": 2.1344844419735755e-05,
"loss": 0.1867,
"step": 100
},
{
"epoch": 4.04,
"grad_norm": 0.2711721360683441,
"learning_rate": 1.9728836206903656e-05,
"loss": 0.1657,
"step": 101
},
{
"epoch": 4.08,
"grad_norm": 0.2831830382347107,
"learning_rate": 1.8169722409183097e-05,
"loss": 0.1692,
"step": 102
},
{
"epoch": 4.12,
"grad_norm": 0.272053599357605,
"learning_rate": 1.6668608091748495e-05,
"loss": 0.1679,
"step": 103
},
{
"epoch": 4.16,
"grad_norm": 0.27920499444007874,
"learning_rate": 1.522655721103291e-05,
"loss": 0.1843,
"step": 104
},
{
"epoch": 4.2,
"grad_norm": 0.2759131193161011,
"learning_rate": 1.3844591860619383e-05,
"loss": 0.1626,
"step": 105
},
{
"epoch": 4.24,
"grad_norm": 0.2894749343395233,
"learning_rate": 1.2523691546803873e-05,
"loss": 0.1762,
"step": 106
},
{
"epoch": 4.28,
"grad_norm": 0.2730141580104828,
"learning_rate": 1.1264792494342857e-05,
"loss": 0.1703,
"step": 107
},
{
"epoch": 4.32,
"grad_norm": 0.26933005452156067,
"learning_rate": 1.0068786982878087e-05,
"loss": 0.163,
"step": 108
},
{
"epoch": 4.36,
"grad_norm": 0.2708563506603241,
"learning_rate": 8.936522714508678e-06,
"loss": 0.1679,
"step": 109
},
{
"epoch": 4.4,
"grad_norm": 0.26494595408439636,
"learning_rate": 7.868802212958703e-06,
"loss": 0.1611,
"step": 110
},
{
"epoch": 4.44,
"grad_norm": 0.2679324746131897,
"learning_rate": 6.866382254766157e-06,
"loss": 0.1646,
"step": 111
},
{
"epoch": 4.48,
"grad_norm": 0.26407480239868164,
"learning_rate": 5.929973332896677e-06,
"loss": 0.1616,
"step": 112
},
{
"epoch": 4.52,
"grad_norm": 0.28398773074150085,
"learning_rate": 5.060239153161872e-06,
"loss": 0.1795,
"step": 113
},
{
"epoch": 4.5600000000000005,
"grad_norm": 0.26915472745895386,
"learning_rate": 4.257796163799455e-06,
"loss": 0.1616,
"step": 114
},
{
"epoch": 4.6,
"grad_norm": 0.2760821282863617,
"learning_rate": 3.5232131185484076e-06,
"loss": 0.1535,
"step": 115
},
{
"epoch": 4.64,
"grad_norm": 0.2624204754829407,
"learning_rate": 2.857010673529015e-06,
"loss": 0.1697,
"step": 116
},
{
"epoch": 4.68,
"grad_norm": 0.2621574103832245,
"learning_rate": 2.259661018213333e-06,
"loss": 0.1627,
"step": 117
},
{
"epoch": 4.72,
"grad_norm": 0.2760251760482788,
"learning_rate": 1.7315875407479032e-06,
"loss": 0.1747,
"step": 118
},
{
"epoch": 4.76,
"grad_norm": 0.26422515511512756,
"learning_rate": 1.2731645278655445e-06,
"loss": 0.1653,
"step": 119
},
{
"epoch": 4.8,
"grad_norm": 0.2870921790599823,
"learning_rate": 8.847168995992916e-07,
"loss": 0.1699,
"step": 120
},
{
"epoch": 4.84,
"grad_norm": 0.2736106812953949,
"learning_rate": 5.665199789862907e-07,
"loss": 0.1688,
"step": 121
},
{
"epoch": 4.88,
"grad_norm": 0.27442148327827454,
"learning_rate": 3.1879929692498757e-07,
"loss": 0.1722,
"step": 122
},
{
"epoch": 4.92,
"grad_norm": 0.26734665036201477,
"learning_rate": 1.4173043232380557e-07,
"loss": 0.1634,
"step": 123
},
{
"epoch": 4.96,
"grad_norm": 0.263090580701828,
"learning_rate": 3.5438887654737355e-08,
"loss": 0.1631,
"step": 124
},
{
"epoch": 5.0,
"grad_norm": 0.2877998650074005,
"learning_rate": 0.0,
"loss": 0.1683,
"step": 125
}
],
"logging_steps": 1,
"max_steps": 125,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.8525301616443392e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}