chai_s7_34b_2k_FPR / trainer_state.json
hflserdaniel's picture
Upload folder using huggingface_hub
dacc5bc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3301465239530292,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.1551,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2.666666666666667e-06,
"loss": 1.154,
"step": 2
},
{
"epoch": 0.02,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1059,
"step": 3
},
{
"epoch": 0.03,
"learning_rate": 5.333333333333334e-06,
"loss": 1.0639,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0078,
"step": 5
},
{
"epoch": 0.04,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9614,
"step": 6
},
{
"epoch": 0.05,
"learning_rate": 9.333333333333334e-06,
"loss": 0.9721,
"step": 7
},
{
"epoch": 0.05,
"learning_rate": 1.0666666666666667e-05,
"loss": 0.9311,
"step": 8
},
{
"epoch": 0.06,
"learning_rate": 1.2e-05,
"loss": 0.9092,
"step": 9
},
{
"epoch": 0.07,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.8757,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.9356,
"step": 11
},
{
"epoch": 0.08,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.8963,
"step": 12
},
{
"epoch": 0.09,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.9095,
"step": 13
},
{
"epoch": 0.09,
"learning_rate": 1.866666666666667e-05,
"loss": 0.9306,
"step": 14
},
{
"epoch": 0.1,
"learning_rate": 2e-05,
"loss": 0.9089,
"step": 15
},
{
"epoch": 0.11,
"learning_rate": 1.9999392458943432e-05,
"loss": 0.9064,
"step": 16
},
{
"epoch": 0.11,
"learning_rate": 1.9997569909594948e-05,
"loss": 0.8986,
"step": 17
},
{
"epoch": 0.12,
"learning_rate": 1.999453257340926e-05,
"loss": 0.923,
"step": 18
},
{
"epoch": 0.13,
"learning_rate": 1.9990280819447662e-05,
"loss": 0.9112,
"step": 19
},
{
"epoch": 0.13,
"learning_rate": 1.9984815164333163e-05,
"loss": 0.8882,
"step": 20
},
{
"epoch": 0.14,
"learning_rate": 1.9978136272187745e-05,
"loss": 0.9149,
"step": 21
},
{
"epoch": 0.15,
"learning_rate": 1.9970244954551648e-05,
"loss": 0.9003,
"step": 22
},
{
"epoch": 0.15,
"learning_rate": 1.9961142170284762e-05,
"loss": 0.8286,
"step": 23
},
{
"epoch": 0.16,
"learning_rate": 1.9950829025450116e-05,
"loss": 0.8972,
"step": 24
},
{
"epoch": 0.17,
"learning_rate": 1.9939306773179498e-05,
"loss": 0.887,
"step": 25
},
{
"epoch": 0.17,
"learning_rate": 1.9926576813521167e-05,
"loss": 0.8968,
"step": 26
},
{
"epoch": 0.18,
"learning_rate": 1.9912640693269754e-05,
"loss": 0.8983,
"step": 27
},
{
"epoch": 0.19,
"learning_rate": 1.98975001057783e-05,
"loss": 0.9228,
"step": 28
},
{
"epoch": 0.19,
"learning_rate": 1.9881156890752517e-05,
"loss": 0.905,
"step": 29
},
{
"epoch": 0.2,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.879,
"step": 30
},
{
"epoch": 0.21,
"learning_rate": 1.9844870667325073e-05,
"loss": 0.8406,
"step": 31
},
{
"epoch": 0.21,
"learning_rate": 1.9824932067997516e-05,
"loss": 0.8851,
"step": 32
},
{
"epoch": 0.22,
"learning_rate": 1.9803799658748096e-05,
"loss": 0.884,
"step": 33
},
{
"epoch": 0.23,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.8482,
"step": 34
},
{
"epoch": 0.23,
"learning_rate": 1.9757963826274357e-05,
"loss": 0.8561,
"step": 35
},
{
"epoch": 0.24,
"learning_rate": 1.973326597248006e-05,
"loss": 0.923,
"step": 36
},
{
"epoch": 0.25,
"learning_rate": 1.97073854469472e-05,
"loss": 0.848,
"step": 37
},
{
"epoch": 0.25,
"learning_rate": 1.968032539437215e-05,
"loss": 0.8416,
"step": 38
},
{
"epoch": 0.26,
"learning_rate": 1.9652089102773487e-05,
"loss": 0.8664,
"step": 39
},
{
"epoch": 0.27,
"learning_rate": 1.9622680003092503e-05,
"loss": 0.8776,
"step": 40
},
{
"epoch": 0.27,
"learning_rate": 1.95921016687763e-05,
"loss": 0.902,
"step": 41
},
{
"epoch": 0.28,
"learning_rate": 1.9560357815343577e-05,
"loss": 0.895,
"step": 42
},
{
"epoch": 0.29,
"learning_rate": 1.9527452299933192e-05,
"loss": 0.8891,
"step": 43
},
{
"epoch": 0.29,
"learning_rate": 1.9493389120835462e-05,
"loss": 0.8614,
"step": 44
},
{
"epoch": 0.3,
"learning_rate": 1.9458172417006347e-05,
"loss": 0.8723,
"step": 45
},
{
"epoch": 0.31,
"learning_rate": 1.9421806467564546e-05,
"loss": 0.8956,
"step": 46
},
{
"epoch": 0.31,
"learning_rate": 1.9384295691271523e-05,
"loss": 0.9021,
"step": 47
},
{
"epoch": 0.32,
"learning_rate": 1.934564464599461e-05,
"loss": 0.9069,
"step": 48
},
{
"epoch": 0.33,
"learning_rate": 1.9305858028153186e-05,
"loss": 0.8903,
"step": 49
},
{
"epoch": 0.33,
"learning_rate": 1.9264940672148018e-05,
"loss": 0.8661,
"step": 50
},
{
"epoch": 0.33,
"eval_loss": 1.056524395942688,
"eval_runtime": 106.5377,
"eval_samples_per_second": 7.303,
"eval_steps_per_second": 0.235,
"step": 50
},
{
"epoch": 0.34,
"learning_rate": 1.922289754977385e-05,
"loss": 0.8758,
"step": 51
},
{
"epoch": 0.35,
"learning_rate": 1.9179733769615273e-05,
"loss": 0.87,
"step": 52
},
{
"epoch": 0.35,
"learning_rate": 1.913545457642601e-05,
"loss": 0.8788,
"step": 53
},
{
"epoch": 0.36,
"learning_rate": 1.909006535049163e-05,
"loss": 0.8621,
"step": 54
},
{
"epoch": 0.37,
"learning_rate": 1.9043571606975776e-05,
"loss": 0.8481,
"step": 55
},
{
"epoch": 0.37,
"learning_rate": 1.899597899525007e-05,
"loss": 0.8284,
"step": 56
},
{
"epoch": 0.38,
"learning_rate": 1.8947293298207637e-05,
"loss": 0.8297,
"step": 57
},
{
"epoch": 0.39,
"learning_rate": 1.8897520431560435e-05,
"loss": 0.8697,
"step": 58
},
{
"epoch": 0.39,
"learning_rate": 1.884666644312046e-05,
"loss": 0.8544,
"step": 59
},
{
"epoch": 0.4,
"learning_rate": 1.879473751206489e-05,
"loss": 0.9064,
"step": 60
},
{
"epoch": 0.41,
"learning_rate": 1.8741739948185256e-05,
"loss": 0.8845,
"step": 61
},
{
"epoch": 0.41,
"learning_rate": 1.8687680191120746e-05,
"loss": 0.8724,
"step": 62
},
{
"epoch": 0.42,
"learning_rate": 1.863256480957574e-05,
"loss": 0.8918,
"step": 63
},
{
"epoch": 0.43,
"learning_rate": 1.8576400500521673e-05,
"loss": 0.893,
"step": 64
},
{
"epoch": 0.43,
"learning_rate": 1.851919408838327e-05,
"loss": 0.8763,
"step": 65
},
{
"epoch": 0.44,
"learning_rate": 1.8460952524209355e-05,
"loss": 0.8619,
"step": 66
},
{
"epoch": 0.45,
"learning_rate": 1.8401682884828212e-05,
"loss": 0.8779,
"step": 67
},
{
"epoch": 0.45,
"learning_rate": 1.83413923719877e-05,
"loss": 0.8755,
"step": 68
},
{
"epoch": 0.46,
"learning_rate": 1.8280088311480203e-05,
"loss": 0.8846,
"step": 69
},
{
"epoch": 0.47,
"learning_rate": 1.821777815225245e-05,
"loss": 0.8681,
"step": 70
},
{
"epoch": 0.47,
"learning_rate": 1.8154469465500447e-05,
"loss": 0.865,
"step": 71
},
{
"epoch": 0.48,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.832,
"step": 72
},
{
"epoch": 0.49,
"learning_rate": 1.802488739991941e-05,
"loss": 0.8529,
"step": 73
},
{
"epoch": 0.49,
"learning_rate": 1.7958629766375387e-05,
"loss": 0.8143,
"step": 74
},
{
"epoch": 0.5,
"learning_rate": 1.789140509396394e-05,
"loss": 0.8611,
"step": 75
},
{
"epoch": 0.51,
"learning_rate": 1.7823221551034766e-05,
"loss": 0.8464,
"step": 76
},
{
"epoch": 0.51,
"learning_rate": 1.7754087422448217e-05,
"loss": 0.828,
"step": 77
},
{
"epoch": 0.52,
"learning_rate": 1.7684011108568593e-05,
"loss": 0.8649,
"step": 78
},
{
"epoch": 0.53,
"learning_rate": 1.7613001124243448e-05,
"loss": 0.8319,
"step": 79
},
{
"epoch": 0.53,
"learning_rate": 1.7541066097768965e-05,
"loss": 0.8834,
"step": 80
},
{
"epoch": 0.54,
"learning_rate": 1.7468214769841542e-05,
"loss": 0.8508,
"step": 81
},
{
"epoch": 0.55,
"learning_rate": 1.7394455992495722e-05,
"loss": 0.8629,
"step": 82
},
{
"epoch": 0.55,
"learning_rate": 1.7319798728028617e-05,
"loss": 0.8607,
"step": 83
},
{
"epoch": 0.56,
"learning_rate": 1.7244252047910893e-05,
"loss": 0.81,
"step": 84
},
{
"epoch": 0.57,
"learning_rate": 1.7167825131684516e-05,
"loss": 0.8616,
"step": 85
},
{
"epoch": 0.57,
"learning_rate": 1.7090527265847375e-05,
"loss": 0.8772,
"step": 86
},
{
"epoch": 0.58,
"learning_rate": 1.7012367842724887e-05,
"loss": 0.8661,
"step": 87
},
{
"epoch": 0.59,
"learning_rate": 1.6933356359328756e-05,
"loss": 0.8721,
"step": 88
},
{
"epoch": 0.59,
"learning_rate": 1.6853502416203e-05,
"loss": 0.8814,
"step": 89
},
{
"epoch": 0.6,
"learning_rate": 1.6772815716257414e-05,
"loss": 0.8447,
"step": 90
},
{
"epoch": 0.61,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.8353,
"step": 91
},
{
"epoch": 0.61,
"learning_rate": 1.6608983362288612e-05,
"loss": 0.8291,
"step": 92
},
{
"epoch": 0.62,
"learning_rate": 1.6525857615241686e-05,
"loss": 0.8955,
"step": 93
},
{
"epoch": 0.63,
"learning_rate": 1.6441938922908644e-05,
"loss": 0.8361,
"step": 94
},
{
"epoch": 0.63,
"learning_rate": 1.6357237482099682e-05,
"loss": 0.8597,
"step": 95
},
{
"epoch": 0.64,
"learning_rate": 1.6271763584735373e-05,
"loss": 0.8375,
"step": 96
},
{
"epoch": 0.65,
"learning_rate": 1.6185527616596096e-05,
"loss": 0.8907,
"step": 97
},
{
"epoch": 0.65,
"learning_rate": 1.609854005606009e-05,
"loss": 0.8775,
"step": 98
},
{
"epoch": 0.66,
"learning_rate": 1.6010811472830253e-05,
"loss": 0.8629,
"step": 99
},
{
"epoch": 0.67,
"learning_rate": 1.5922352526649803e-05,
"loss": 0.8306,
"step": 100
},
{
"epoch": 0.67,
"eval_loss": 1.0295250415802002,
"eval_runtime": 108.1298,
"eval_samples_per_second": 7.195,
"eval_steps_per_second": 0.231,
"step": 100
},
{
"epoch": 0.67,
"learning_rate": 1.583317396600707e-05,
"loss": 0.856,
"step": 101
},
{
"epoch": 0.68,
"learning_rate": 1.5743286626829437e-05,
"loss": 0.8463,
"step": 102
},
{
"epoch": 0.69,
"learning_rate": 1.565270143116672e-05,
"loss": 0.8399,
"step": 103
},
{
"epoch": 0.69,
"learning_rate": 1.5561429385864005e-05,
"loss": 0.9041,
"step": 104
},
{
"epoch": 0.7,
"learning_rate": 1.5469481581224274e-05,
"loss": 0.8555,
"step": 105
},
{
"epoch": 0.7,
"learning_rate": 1.5376869189660784e-05,
"loss": 0.8195,
"step": 106
},
{
"epoch": 0.71,
"learning_rate": 1.528360346433959e-05,
"loss": 0.8211,
"step": 107
},
{
"epoch": 0.72,
"learning_rate": 1.5189695737812153e-05,
"loss": 0.8115,
"step": 108
},
{
"epoch": 0.72,
"learning_rate": 1.5095157420638349e-05,
"loss": 0.8764,
"step": 109
},
{
"epoch": 0.73,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.8073,
"step": 110
},
{
"epoch": 0.74,
"learning_rate": 1.4904235038305084e-05,
"loss": 0.8068,
"step": 111
},
{
"epoch": 0.74,
"learning_rate": 1.4807874171782795e-05,
"loss": 0.8571,
"step": 112
},
{
"epoch": 0.75,
"learning_rate": 1.4710929109069674e-05,
"loss": 0.8372,
"step": 113
},
{
"epoch": 0.76,
"learning_rate": 1.461341162978688e-05,
"loss": 0.8119,
"step": 114
},
{
"epoch": 0.76,
"learning_rate": 1.4515333583108896e-05,
"loss": 0.8474,
"step": 115
},
{
"epoch": 0.77,
"learning_rate": 1.4416706886323741e-05,
"loss": 0.8273,
"step": 116
},
{
"epoch": 0.78,
"learning_rate": 1.4317543523384928e-05,
"loss": 0.8668,
"step": 117
},
{
"epoch": 0.78,
"learning_rate": 1.4217855543455323e-05,
"loss": 0.8522,
"step": 118
},
{
"epoch": 0.79,
"learning_rate": 1.4117655059443052e-05,
"loss": 0.8469,
"step": 119
},
{
"epoch": 0.8,
"learning_rate": 1.4016954246529697e-05,
"loss": 0.8221,
"step": 120
},
{
"epoch": 0.8,
"learning_rate": 1.3915765340690916e-05,
"loss": 0.851,
"step": 121
},
{
"epoch": 0.81,
"learning_rate": 1.3814100637209663e-05,
"loss": 0.8476,
"step": 122
},
{
"epoch": 0.82,
"learning_rate": 1.3711972489182208e-05,
"loss": 0.8443,
"step": 123
},
{
"epoch": 0.82,
"learning_rate": 1.3609393306017149e-05,
"loss": 0.848,
"step": 124
},
{
"epoch": 0.83,
"learning_rate": 1.3506375551927546e-05,
"loss": 0.815,
"step": 125
},
{
"epoch": 0.84,
"learning_rate": 1.3402931744416432e-05,
"loss": 0.8309,
"step": 126
},
{
"epoch": 0.84,
"learning_rate": 1.3299074452755829e-05,
"loss": 0.7954,
"step": 127
},
{
"epoch": 0.85,
"learning_rate": 1.3194816296459483e-05,
"loss": 0.8263,
"step": 128
},
{
"epoch": 0.86,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.8303,
"step": 129
},
{
"epoch": 0.86,
"learning_rate": 1.2985148110016947e-05,
"loss": 0.8463,
"step": 130
},
{
"epoch": 0.87,
"learning_rate": 1.2879763556277062e-05,
"loss": 0.8681,
"step": 131
},
{
"epoch": 0.88,
"learning_rate": 1.2774029087618448e-05,
"loss": 0.8244,
"step": 132
},
{
"epoch": 0.88,
"learning_rate": 1.2667957551647263e-05,
"loss": 0.8291,
"step": 133
},
{
"epoch": 0.89,
"learning_rate": 1.2561561836926115e-05,
"loss": 0.8724,
"step": 134
},
{
"epoch": 0.9,
"learning_rate": 1.2454854871407993e-05,
"loss": 0.8229,
"step": 135
},
{
"epoch": 0.9,
"learning_rate": 1.234784962086541e-05,
"loss": 0.8299,
"step": 136
},
{
"epoch": 0.91,
"learning_rate": 1.224055908731496e-05,
"loss": 0.8297,
"step": 137
},
{
"epoch": 0.92,
"learning_rate": 1.213299630743747e-05,
"loss": 0.8286,
"step": 138
},
{
"epoch": 0.92,
"learning_rate": 1.2025174350993923e-05,
"loss": 0.8542,
"step": 139
},
{
"epoch": 0.93,
"learning_rate": 1.1917106319237386e-05,
"loss": 0.8157,
"step": 140
},
{
"epoch": 0.94,
"learning_rate": 1.1808805343321102e-05,
"loss": 0.8591,
"step": 141
},
{
"epoch": 0.94,
"learning_rate": 1.1700284582702933e-05,
"loss": 0.8192,
"step": 142
},
{
"epoch": 0.95,
"learning_rate": 1.1591557223546394e-05,
"loss": 0.8493,
"step": 143
},
{
"epoch": 0.96,
"learning_rate": 1.148263647711842e-05,
"loss": 0.858,
"step": 144
},
{
"epoch": 0.96,
"learning_rate": 1.1373535578184083e-05,
"loss": 0.7751,
"step": 145
},
{
"epoch": 0.97,
"learning_rate": 1.1264267783398463e-05,
"loss": 0.7998,
"step": 146
},
{
"epoch": 0.98,
"learning_rate": 1.1154846369695864e-05,
"loss": 0.8321,
"step": 147
},
{
"epoch": 0.98,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.8339,
"step": 148
},
{
"epoch": 0.99,
"learning_rate": 1.093559588499118e-05,
"loss": 0.8018,
"step": 149
},
{
"epoch": 1.0,
"learning_rate": 1.0825793454723325e-05,
"loss": 0.8481,
"step": 150
},
{
"epoch": 1.0,
"eval_loss": 1.0097606182098389,
"eval_runtime": 105.9007,
"eval_samples_per_second": 7.347,
"eval_steps_per_second": 0.236,
"step": 150
},
{
"epoch": 1.0,
"learning_rate": 1.0715890683769872e-05,
"loss": 0.6491,
"step": 151
},
{
"epoch": 1.01,
"learning_rate": 1.060590092621994e-05,
"loss": 0.566,
"step": 152
},
{
"epoch": 1.02,
"learning_rate": 1.0495837546732224e-05,
"loss": 0.5246,
"step": 153
},
{
"epoch": 1.02,
"learning_rate": 1.0385713918911104e-05,
"loss": 0.553,
"step": 154
},
{
"epoch": 1.03,
"learning_rate": 1.0275543423681622e-05,
"loss": 0.5431,
"step": 155
},
{
"epoch": 1.04,
"learning_rate": 1.0165339447663586e-05,
"loss": 0.5586,
"step": 156
},
{
"epoch": 1.04,
"learning_rate": 1.0055115381545006e-05,
"loss": 0.5666,
"step": 157
},
{
"epoch": 1.05,
"learning_rate": 9.944884618454996e-06,
"loss": 0.5396,
"step": 158
},
{
"epoch": 1.06,
"learning_rate": 9.834660552336415e-06,
"loss": 0.5399,
"step": 159
},
{
"epoch": 1.06,
"learning_rate": 9.724456576318383e-06,
"loss": 0.5398,
"step": 160
},
{
"epoch": 1.07,
"learning_rate": 9.614286081088895e-06,
"loss": 0.5432,
"step": 161
},
{
"epoch": 1.08,
"learning_rate": 9.504162453267776e-06,
"loss": 0.5178,
"step": 162
},
{
"epoch": 1.08,
"learning_rate": 9.394099073780066e-06,
"loss": 0.5477,
"step": 163
},
{
"epoch": 1.09,
"learning_rate": 9.284109316230133e-06,
"loss": 0.5143,
"step": 164
},
{
"epoch": 1.1,
"learning_rate": 9.174206545276678e-06,
"loss": 0.5184,
"step": 165
},
{
"epoch": 1.1,
"learning_rate": 9.064404115008824e-06,
"loss": 0.551,
"step": 166
},
{
"epoch": 1.11,
"learning_rate": 8.954715367323468e-06,
"loss": 0.5344,
"step": 167
},
{
"epoch": 1.12,
"learning_rate": 8.84515363030414e-06,
"loss": 0.5309,
"step": 168
},
{
"epoch": 1.12,
"learning_rate": 8.735732216601538e-06,
"loss": 0.5222,
"step": 169
},
{
"epoch": 1.13,
"learning_rate": 8.626464421815919e-06,
"loss": 0.5409,
"step": 170
},
{
"epoch": 1.14,
"learning_rate": 8.51736352288158e-06,
"loss": 0.505,
"step": 171
},
{
"epoch": 1.14,
"learning_rate": 8.408442776453606e-06,
"loss": 0.5329,
"step": 172
},
{
"epoch": 1.15,
"learning_rate": 8.299715417297072e-06,
"loss": 0.5214,
"step": 173
},
{
"epoch": 1.16,
"learning_rate": 8.191194656678905e-06,
"loss": 0.5575,
"step": 174
},
{
"epoch": 1.16,
"learning_rate": 8.082893680762619e-06,
"loss": 0.4793,
"step": 175
},
{
"epoch": 1.17,
"learning_rate": 7.974825649006082e-06,
"loss": 0.5347,
"step": 176
},
{
"epoch": 1.18,
"learning_rate": 7.867003692562533e-06,
"loss": 0.5023,
"step": 177
},
{
"epoch": 1.18,
"learning_rate": 7.759440912685043e-06,
"loss": 0.5287,
"step": 178
},
{
"epoch": 1.19,
"learning_rate": 7.652150379134593e-06,
"loss": 0.5421,
"step": 179
},
{
"epoch": 1.2,
"learning_rate": 7.545145128592009e-06,
"loss": 0.5401,
"step": 180
},
{
"epoch": 1.2,
"learning_rate": 7.438438163073884e-06,
"loss": 0.5294,
"step": 181
},
{
"epoch": 1.21,
"learning_rate": 7.3320424483527385e-06,
"loss": 0.5514,
"step": 182
},
{
"epoch": 1.22,
"learning_rate": 7.225970912381557e-06,
"loss": 0.5564,
"step": 183
},
{
"epoch": 1.22,
"learning_rate": 7.120236443722941e-06,
"loss": 0.5374,
"step": 184
},
{
"epoch": 1.23,
"learning_rate": 7.014851889983058e-06,
"loss": 0.5404,
"step": 185
},
{
"epoch": 1.24,
"learning_rate": 6.909830056250527e-06,
"loss": 0.5479,
"step": 186
},
{
"epoch": 1.24,
"learning_rate": 6.80518370354052e-06,
"loss": 0.4935,
"step": 187
},
{
"epoch": 1.25,
"learning_rate": 6.700925547244173e-06,
"loss": 0.4902,
"step": 188
},
{
"epoch": 1.26,
"learning_rate": 6.59706825558357e-06,
"loss": 0.5132,
"step": 189
},
{
"epoch": 1.26,
"learning_rate": 6.4936244480724575e-06,
"loss": 0.5228,
"step": 190
},
{
"epoch": 1.27,
"learning_rate": 6.3906066939828546e-06,
"loss": 0.5442,
"step": 191
},
{
"epoch": 1.28,
"learning_rate": 6.2880275108177915e-06,
"loss": 0.516,
"step": 192
},
{
"epoch": 1.28,
"learning_rate": 6.18589936279034e-06,
"loss": 0.5257,
"step": 193
},
{
"epoch": 1.29,
"learning_rate": 6.084234659309088e-06,
"loss": 0.5389,
"step": 194
},
{
"epoch": 1.3,
"learning_rate": 5.983045753470308e-06,
"loss": 0.5254,
"step": 195
},
{
"epoch": 1.3,
"learning_rate": 5.8823449405569525e-06,
"loss": 0.5112,
"step": 196
},
{
"epoch": 1.31,
"learning_rate": 5.782144456544681e-06,
"loss": 0.5496,
"step": 197
},
{
"epoch": 1.32,
"learning_rate": 5.6824564766150724e-06,
"loss": 0.5285,
"step": 198
},
{
"epoch": 1.32,
"learning_rate": 5.58329311367626e-06,
"loss": 0.5261,
"step": 199
},
{
"epoch": 1.33,
"learning_rate": 5.484666416891109e-06,
"loss": 0.5185,
"step": 200
},
{
"epoch": 1.33,
"eval_loss": 1.0541412830352783,
"eval_runtime": 105.5736,
"eval_samples_per_second": 7.369,
"eval_steps_per_second": 0.237,
"step": 200
}
],
"logging_steps": 1.0,
"max_steps": 300,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 304188748595200.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}