medical / trainer_state.json
naot97's picture
Upload 12 files
1459edd
raw
history blame
20.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.397350993377484,
"global_step": 166,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 8.333333333333333e-07,
"loss": 1.3883,
"step": 1
},
{
"epoch": 0.05,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.4311,
"step": 2
},
{
"epoch": 0.08,
"learning_rate": 2.5e-06,
"loss": 1.2563,
"step": 3
},
{
"epoch": 0.11,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.2668,
"step": 4
},
{
"epoch": 0.13,
"learning_rate": 4.166666666666667e-06,
"loss": 1.2419,
"step": 5
},
{
"epoch": 0.16,
"learning_rate": 5e-06,
"loss": 1.3009,
"step": 6
},
{
"epoch": 0.19,
"learning_rate": 5.833333333333334e-06,
"loss": 1.2596,
"step": 7
},
{
"epoch": 0.21,
"learning_rate": 6.666666666666667e-06,
"loss": 1.362,
"step": 8
},
{
"epoch": 0.24,
"learning_rate": 7.500000000000001e-06,
"loss": 1.3293,
"step": 9
},
{
"epoch": 0.26,
"learning_rate": 8.333333333333334e-06,
"loss": 1.4663,
"step": 10
},
{
"epoch": 0.29,
"learning_rate": 9.166666666666666e-06,
"loss": 1.4526,
"step": 11
},
{
"epoch": 0.32,
"learning_rate": 1e-05,
"loss": 1.4587,
"step": 12
},
{
"epoch": 0.34,
"learning_rate": 9.999807482189519e-06,
"loss": 1.2978,
"step": 13
},
{
"epoch": 0.37,
"learning_rate": 9.999229943583318e-06,
"loss": 1.2931,
"step": 14
},
{
"epoch": 0.4,
"learning_rate": 9.998267428655983e-06,
"loss": 1.4222,
"step": 15
},
{
"epoch": 0.42,
"learning_rate": 9.996920011528022e-06,
"loss": 1.3721,
"step": 16
},
{
"epoch": 0.45,
"learning_rate": 9.995187795960152e-06,
"loss": 1.2833,
"step": 17
},
{
"epoch": 0.48,
"learning_rate": 9.993070915345313e-06,
"loss": 1.2782,
"step": 18
},
{
"epoch": 0.5,
"learning_rate": 9.990569532698393e-06,
"loss": 1.3794,
"step": 19
},
{
"epoch": 0.53,
"learning_rate": 9.987683840643679e-06,
"loss": 1.3194,
"step": 20
},
{
"epoch": 0.56,
"learning_rate": 9.984414061400011e-06,
"loss": 1.3508,
"step": 21
},
{
"epoch": 0.58,
"learning_rate": 9.980760446763693e-06,
"loss": 1.4149,
"step": 22
},
{
"epoch": 0.61,
"learning_rate": 9.976723278089075e-06,
"loss": 1.2859,
"step": 23
},
{
"epoch": 0.64,
"learning_rate": 9.97230286626691e-06,
"loss": 1.2876,
"step": 24
},
{
"epoch": 0.66,
"learning_rate": 9.967499551700398e-06,
"loss": 1.4954,
"step": 25
},
{
"epoch": 0.69,
"learning_rate": 9.962313704278981e-06,
"loss": 1.4377,
"step": 26
},
{
"epoch": 0.72,
"learning_rate": 9.956745723349857e-06,
"loss": 1.4215,
"step": 27
},
{
"epoch": 0.74,
"learning_rate": 9.950796037687224e-06,
"loss": 1.3041,
"step": 28
},
{
"epoch": 0.77,
"learning_rate": 9.944465105459265e-06,
"loss": 1.2894,
"step": 29
},
{
"epoch": 0.79,
"learning_rate": 9.937753414192862e-06,
"loss": 1.3224,
"step": 30
},
{
"epoch": 0.82,
"learning_rate": 9.93066148073606e-06,
"loss": 1.3959,
"step": 31
},
{
"epoch": 0.85,
"learning_rate": 9.923189851218259e-06,
"loss": 1.2608,
"step": 32
},
{
"epoch": 0.87,
"learning_rate": 9.915339101008162e-06,
"loss": 1.2771,
"step": 33
},
{
"epoch": 0.9,
"learning_rate": 9.907109834669465e-06,
"loss": 1.3177,
"step": 34
},
{
"epoch": 0.93,
"learning_rate": 9.898502685914301e-06,
"loss": 1.3932,
"step": 35
},
{
"epoch": 0.95,
"learning_rate": 9.889518317554446e-06,
"loss": 1.4338,
"step": 36
},
{
"epoch": 0.98,
"learning_rate": 9.880157421450269e-06,
"loss": 1.3006,
"step": 37
},
{
"epoch": 1.01,
"learning_rate": 9.870420718457458e-06,
"loss": 1.4079,
"step": 38
},
{
"epoch": 1.03,
"learning_rate": 9.860308958371511e-06,
"loss": 1.3192,
"step": 39
},
{
"epoch": 1.06,
"learning_rate": 9.849822919869993e-06,
"loss": 1.331,
"step": 40
},
{
"epoch": 1.09,
"learning_rate": 9.838963410452572e-06,
"loss": 1.2477,
"step": 41
},
{
"epoch": 1.11,
"learning_rate": 9.827731266378839e-06,
"loss": 1.3087,
"step": 42
},
{
"epoch": 1.14,
"learning_rate": 9.816127352603907e-06,
"loss": 1.2522,
"step": 43
},
{
"epoch": 1.17,
"learning_rate": 9.804152562711804e-06,
"loss": 1.2771,
"step": 44
},
{
"epoch": 1.19,
"learning_rate": 9.791807818846665e-06,
"loss": 1.4044,
"step": 45
},
{
"epoch": 1.22,
"learning_rate": 9.779094071641712e-06,
"loss": 1.2256,
"step": 46
},
{
"epoch": 1.25,
"learning_rate": 9.766012300146058e-06,
"loss": 1.3511,
"step": 47
},
{
"epoch": 1.27,
"learning_rate": 9.752563511749301e-06,
"loss": 1.316,
"step": 48
},
{
"epoch": 1.3,
"learning_rate": 9.738748742103961e-06,
"loss": 1.2006,
"step": 49
},
{
"epoch": 1.32,
"learning_rate": 9.724569055045722e-06,
"loss": 1.2237,
"step": 50
},
{
"epoch": 1.35,
"learning_rate": 9.710025542511503e-06,
"loss": 1.2012,
"step": 51
},
{
"epoch": 1.38,
"learning_rate": 9.695119324455383e-06,
"loss": 1.3153,
"step": 52
},
{
"epoch": 1.4,
"learning_rate": 9.679851548762344e-06,
"loss": 1.2494,
"step": 53
},
{
"epoch": 1.43,
"learning_rate": 9.664223391159885e-06,
"loss": 1.3274,
"step": 54
},
{
"epoch": 1.46,
"learning_rate": 9.648236055127485e-06,
"loss": 1.4895,
"step": 55
},
{
"epoch": 1.48,
"learning_rate": 9.631890771803909e-06,
"loss": 1.2369,
"step": 56
},
{
"epoch": 1.51,
"learning_rate": 9.61518879989242e-06,
"loss": 1.3197,
"step": 57
},
{
"epoch": 1.54,
"learning_rate": 9.598131425563847e-06,
"loss": 1.3453,
"step": 58
},
{
"epoch": 1.56,
"learning_rate": 9.580719962357532e-06,
"loss": 1.3373,
"step": 59
},
{
"epoch": 1.59,
"learning_rate": 9.562955751080183e-06,
"loss": 1.2564,
"step": 60
},
{
"epoch": 1.62,
"learning_rate": 9.544840159702626e-06,
"loss": 1.3595,
"step": 61
},
{
"epoch": 1.64,
"learning_rate": 9.526374583254454e-06,
"loss": 1.3072,
"step": 62
},
{
"epoch": 1.67,
"learning_rate": 9.507560443716608e-06,
"loss": 1.2864,
"step": 63
},
{
"epoch": 1.7,
"learning_rate": 9.488399189911866e-06,
"loss": 1.2683,
"step": 64
},
{
"epoch": 1.72,
"learning_rate": 9.468892297393281e-06,
"loss": 1.3008,
"step": 65
},
{
"epoch": 1.75,
"learning_rate": 9.449041268330549e-06,
"loss": 1.2022,
"step": 66
},
{
"epoch": 1.77,
"learning_rate": 9.428847631394324e-06,
"loss": 1.2575,
"step": 67
},
{
"epoch": 1.8,
"learning_rate": 9.408312941638522e-06,
"loss": 1.2705,
"step": 68
},
{
"epoch": 1.83,
"learning_rate": 9.387438780380541e-06,
"loss": 1.2692,
"step": 69
},
{
"epoch": 1.85,
"learning_rate": 9.366226755079513e-06,
"loss": 1.2362,
"step": 70
},
{
"epoch": 1.88,
"learning_rate": 9.344678499212503e-06,
"loss": 1.2403,
"step": 71
},
{
"epoch": 1.91,
"learning_rate": 9.322795672148726e-06,
"loss": 1.3415,
"step": 72
},
{
"epoch": 1.93,
"learning_rate": 9.300579959021766e-06,
"loss": 1.4069,
"step": 73
},
{
"epoch": 1.96,
"learning_rate": 9.2780330705998e-06,
"loss": 1.2953,
"step": 74
},
{
"epoch": 1.99,
"learning_rate": 9.255156743153867e-06,
"loss": 1.3696,
"step": 75
},
{
"epoch": 2.01,
"learning_rate": 9.231952738324155e-06,
"loss": 1.3844,
"step": 76
},
{
"epoch": 2.04,
"learning_rate": 9.208422842984345e-06,
"loss": 1.277,
"step": 77
},
{
"epoch": 2.07,
"learning_rate": 9.18456886910401e-06,
"loss": 1.2956,
"step": 78
},
{
"epoch": 2.09,
"learning_rate": 9.16039265360908e-06,
"loss": 1.4063,
"step": 79
},
{
"epoch": 2.12,
"learning_rate": 9.135896058240384e-06,
"loss": 1.3085,
"step": 80
},
{
"epoch": 2.15,
"learning_rate": 9.111080969410282e-06,
"loss": 1.3986,
"step": 81
},
{
"epoch": 2.17,
"learning_rate": 9.085949298057402e-06,
"loss": 1.4697,
"step": 82
},
{
"epoch": 2.2,
"learning_rate": 9.060502979499484e-06,
"loss": 1.239,
"step": 83
},
{
"epoch": 2.23,
"learning_rate": 9.034743973284337e-06,
"loss": 1.2664,
"step": 84
},
{
"epoch": 2.25,
"learning_rate": 9.008674263038954e-06,
"loss": 1.3195,
"step": 85
},
{
"epoch": 2.28,
"learning_rate": 8.98229585631675e-06,
"loss": 1.3899,
"step": 86
},
{
"epoch": 2.3,
"learning_rate": 8.955610784442967e-06,
"loss": 1.2493,
"step": 87
},
{
"epoch": 2.33,
"learning_rate": 8.928621102358248e-06,
"loss": 1.2793,
"step": 88
},
{
"epoch": 2.36,
"learning_rate": 8.901328888460394e-06,
"loss": 1.2779,
"step": 89
},
{
"epoch": 2.38,
"learning_rate": 8.873736244444311e-06,
"loss": 1.4215,
"step": 90
},
{
"epoch": 2.41,
"learning_rate": 8.845845295140163e-06,
"loss": 1.2914,
"step": 91
},
{
"epoch": 2.44,
"learning_rate": 8.817658188349745e-06,
"loss": 1.2663,
"step": 92
},
{
"epoch": 2.46,
"learning_rate": 8.789177094681091e-06,
"loss": 1.3403,
"step": 93
},
{
"epoch": 2.49,
"learning_rate": 8.76040420738132e-06,
"loss": 1.301,
"step": 94
},
{
"epoch": 2.52,
"learning_rate": 8.731341742167739e-06,
"loss": 1.2804,
"step": 95
},
{
"epoch": 2.54,
"learning_rate": 8.701991937057211e-06,
"loss": 1.4168,
"step": 96
},
{
"epoch": 2.57,
"learning_rate": 8.672357052193826e-06,
"loss": 1.4377,
"step": 97
},
{
"epoch": 2.6,
"learning_rate": 8.642439369674845e-06,
"loss": 1.272,
"step": 98
},
{
"epoch": 2.62,
"learning_rate": 8.612241193374958e-06,
"loss": 1.2942,
"step": 99
},
{
"epoch": 2.65,
"learning_rate": 8.581764848768878e-06,
"loss": 1.3492,
"step": 100
},
{
"epoch": 2.68,
"learning_rate": 8.551012682752262e-06,
"loss": 1.198,
"step": 101
},
{
"epoch": 2.7,
"learning_rate": 8.519987063460973e-06,
"loss": 1.2902,
"step": 102
},
{
"epoch": 2.73,
"learning_rate": 8.488690380088733e-06,
"loss": 1.2746,
"step": 103
},
{
"epoch": 2.75,
"learning_rate": 8.457125042703124e-06,
"loss": 1.2148,
"step": 104
},
{
"epoch": 2.78,
"learning_rate": 8.425293482060002e-06,
"loss": 1.2625,
"step": 105
},
{
"epoch": 2.81,
"learning_rate": 8.393198149416311e-06,
"loss": 1.4119,
"step": 106
},
{
"epoch": 2.83,
"learning_rate": 8.360841516341319e-06,
"loss": 1.1984,
"step": 107
},
{
"epoch": 2.86,
"learning_rate": 8.328226074526284e-06,
"loss": 1.1971,
"step": 108
},
{
"epoch": 2.89,
"learning_rate": 8.295354335592588e-06,
"loss": 1.1877,
"step": 109
},
{
"epoch": 2.91,
"learning_rate": 8.262228830898313e-06,
"loss": 1.2444,
"step": 110
},
{
"epoch": 2.94,
"learning_rate": 8.22885211134331e-06,
"loss": 1.288,
"step": 111
},
{
"epoch": 2.97,
"learning_rate": 8.19522674717277e-06,
"loss": 1.1986,
"step": 112
},
{
"epoch": 2.99,
"learning_rate": 8.161355327779289e-06,
"loss": 1.3661,
"step": 113
},
{
"epoch": 3.02,
"learning_rate": 8.127240461503462e-06,
"loss": 1.3307,
"step": 114
},
{
"epoch": 3.05,
"learning_rate": 8.092884775433037e-06,
"loss": 1.2406,
"step": 115
},
{
"epoch": 3.07,
"learning_rate": 8.058290915200597e-06,
"loss": 1.3712,
"step": 116
},
{
"epoch": 3.1,
"learning_rate": 8.023461544779833e-06,
"loss": 1.3295,
"step": 117
},
{
"epoch": 3.13,
"learning_rate": 7.988399346280398e-06,
"loss": 1.4179,
"step": 118
},
{
"epoch": 3.15,
"learning_rate": 7.953107019741366e-06,
"loss": 1.3459,
"step": 119
},
{
"epoch": 3.18,
"learning_rate": 7.917587282923312e-06,
"loss": 1.3335,
"step": 120
},
{
"epoch": 3.21,
"learning_rate": 7.88184287109902e-06,
"loss": 1.2718,
"step": 121
},
{
"epoch": 3.23,
"learning_rate": 7.845876536842846e-06,
"loss": 1.2594,
"step": 122
},
{
"epoch": 3.26,
"learning_rate": 7.809691049818766e-06,
"loss": 1.2333,
"step": 123
},
{
"epoch": 3.28,
"learning_rate": 7.773289196567066e-06,
"loss": 1.1418,
"step": 124
},
{
"epoch": 3.31,
"learning_rate": 7.736673780289788e-06,
"loss": 1.3162,
"step": 125
},
{
"epoch": 3.34,
"learning_rate": 7.699847620634834e-06,
"loss": 1.1933,
"step": 126
},
{
"epoch": 3.36,
"learning_rate": 7.662813553478857e-06,
"loss": 1.1409,
"step": 127
},
{
"epoch": 3.39,
"learning_rate": 7.625574430708867e-06,
"loss": 1.1902,
"step": 128
},
{
"epoch": 3.42,
"learning_rate": 7.588133120002612e-06,
"loss": 1.1772,
"step": 129
},
{
"epoch": 3.44,
"learning_rate": 7.5504925046077596e-06,
"loss": 1.1877,
"step": 130
},
{
"epoch": 3.47,
"learning_rate": 7.5126554831198506e-06,
"loss": 1.1943,
"step": 131
},
{
"epoch": 3.5,
"learning_rate": 7.474624969259101e-06,
"loss": 1.3182,
"step": 132
},
{
"epoch": 3.52,
"learning_rate": 7.436403891646014e-06,
"loss": 1.2202,
"step": 133
},
{
"epoch": 3.55,
"learning_rate": 7.3979951935758596e-06,
"loss": 1.4638,
"step": 134
},
{
"epoch": 3.58,
"learning_rate": 7.359401832792019e-06,
"loss": 1.2086,
"step": 135
},
{
"epoch": 3.6,
"learning_rate": 7.32062678125822e-06,
"loss": 1.26,
"step": 136
},
{
"epoch": 3.63,
"learning_rate": 7.281673024929674e-06,
"loss": 1.1927,
"step": 137
},
{
"epoch": 3.66,
"learning_rate": 7.242543563523128e-06,
"loss": 1.3236,
"step": 138
},
{
"epoch": 3.68,
"learning_rate": 7.2032414102858795e-06,
"loss": 1.2632,
"step": 139
},
{
"epoch": 3.71,
"learning_rate": 7.163769591763723e-06,
"loss": 1.2393,
"step": 140
},
{
"epoch": 3.74,
"learning_rate": 7.1241311475678896e-06,
"loss": 1.2769,
"step": 141
},
{
"epoch": 3.76,
"learning_rate": 7.084329130140972e-06,
"loss": 1.177,
"step": 142
},
{
"epoch": 3.79,
"learning_rate": 7.044366604521874e-06,
"loss": 1.3389,
"step": 143
},
{
"epoch": 3.81,
"learning_rate": 7.004246648109765e-06,
"loss": 1.2673,
"step": 144
},
{
"epoch": 3.84,
"learning_rate": 6.963972350427112e-06,
"loss": 1.3116,
"step": 145
},
{
"epoch": 3.87,
"learning_rate": 6.923546812881759e-06,
"loss": 1.1558,
"step": 146
},
{
"epoch": 3.89,
"learning_rate": 6.882973148528096e-06,
"loss": 1.2577,
"step": 147
},
{
"epoch": 3.92,
"learning_rate": 6.8422544818273336e-06,
"loss": 1.1485,
"step": 148
},
{
"epoch": 3.95,
"learning_rate": 6.801393948406894e-06,
"loss": 1.3308,
"step": 149
},
{
"epoch": 3.97,
"learning_rate": 6.760394694818949e-06,
"loss": 1.1488,
"step": 150
},
{
"epoch": 4.0,
"learning_rate": 6.719259878298113e-06,
"loss": 1.2665,
"step": 151
},
{
"epoch": 4.03,
"learning_rate": 6.67799266651831e-06,
"loss": 1.2601,
"step": 152
},
{
"epoch": 4.05,
"learning_rate": 6.63659623734884e-06,
"loss": 1.2757,
"step": 153
},
{
"epoch": 4.08,
"learning_rate": 6.59507377860967e-06,
"loss": 1.3131,
"step": 154
},
{
"epoch": 4.11,
"learning_rate": 6.553428487825933e-06,
"loss": 1.4065,
"step": 155
},
{
"epoch": 4.13,
"learning_rate": 6.511663571981708e-06,
"loss": 1.2555,
"step": 156
},
{
"epoch": 4.16,
"learning_rate": 6.46978224727306e-06,
"loss": 1.2421,
"step": 157
},
{
"epoch": 4.19,
"learning_rate": 6.42778773886036e-06,
"loss": 1.3245,
"step": 158
},
{
"epoch": 4.21,
"learning_rate": 6.3856832806199324e-06,
"loss": 1.3012,
"step": 159
},
{
"epoch": 4.24,
"learning_rate": 6.343472114895022e-06,
"loss": 1.155,
"step": 160
},
{
"epoch": 4.26,
"learning_rate": 6.301157492246111e-06,
"loss": 1.2554,
"step": 161
},
{
"epoch": 4.29,
"learning_rate": 6.2587426712006005e-06,
"loss": 1.348,
"step": 162
},
{
"epoch": 4.32,
"learning_rate": 6.216230918001881e-06,
"loss": 1.2359,
"step": 163
},
{
"epoch": 4.34,
"learning_rate": 6.173625506357814e-06,
"loss": 1.2989,
"step": 164
},
{
"epoch": 4.37,
"learning_rate": 6.1309297171886225e-06,
"loss": 1.2298,
"step": 165
},
{
"epoch": 4.4,
"learning_rate": 6.088146838374247e-06,
"loss": 1.2104,
"step": 166
}
],
"max_steps": 370,
"num_train_epochs": 10,
"total_flos": 1.3814061758152704e+16,
"trial_name": null,
"trial_params": null
}