PEFT
Not-For-All-Audiences
kingbri's picture
Add multi-prompt twochar
85aac7e
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.4851485148514851,
"eval_steps": 20,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 6.5e-06,
"loss": 2.0407,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 1.3e-05,
"loss": 2.3107,
"step": 2
},
{
"epoch": 0.03,
"learning_rate": 1.9499999999999996e-05,
"loss": 2.1613,
"step": 3
},
{
"epoch": 0.04,
"learning_rate": 2.6e-05,
"loss": 2.0444,
"step": 4
},
{
"epoch": 0.05,
"learning_rate": 3.25e-05,
"loss": 2.2512,
"step": 5
},
{
"epoch": 0.06,
"learning_rate": 3.899999999999999e-05,
"loss": 2.0516,
"step": 6
},
{
"epoch": 0.07,
"learning_rate": 4.5499999999999995e-05,
"loss": 2.2006,
"step": 7
},
{
"epoch": 0.08,
"learning_rate": 5.2e-05,
"loss": 2.1229,
"step": 8
},
{
"epoch": 0.09,
"learning_rate": 5.85e-05,
"loss": 2.2498,
"step": 9
},
{
"epoch": 0.1,
"learning_rate": 6.5e-05,
"loss": 2.0293,
"step": 10
},
{
"epoch": 0.11,
"learning_rate": 6.499564948206075e-05,
"loss": 2.1316,
"step": 11
},
{
"epoch": 0.12,
"learning_rate": 6.498259909298188e-05,
"loss": 2.1274,
"step": 12
},
{
"epoch": 0.13,
"learning_rate": 6.49608523266681e-05,
"loss": 2.1129,
"step": 13
},
{
"epoch": 0.14,
"learning_rate": 6.493041500525461e-05,
"loss": 2.1517,
"step": 14
},
{
"epoch": 0.15,
"learning_rate": 6.489129527754836e-05,
"loss": 2.1078,
"step": 15
},
{
"epoch": 0.16,
"learning_rate": 6.484350361684639e-05,
"loss": 2.0362,
"step": 16
},
{
"epoch": 0.17,
"learning_rate": 6.478705281813194e-05,
"loss": 2.0553,
"step": 17
},
{
"epoch": 0.18,
"learning_rate": 6.472195799464884e-05,
"loss": 2.1223,
"step": 18
},
{
"epoch": 0.19,
"learning_rate": 6.464823657385538e-05,
"loss": 2.0326,
"step": 19
},
{
"epoch": 0.2,
"learning_rate": 6.456590829275857e-05,
"loss": 2.1802,
"step": 20
},
{
"epoch": 0.2,
"eval_loss": 2.012301445007324,
"eval_runtime": 1.4855,
"eval_samples_per_second": 5.385,
"eval_steps_per_second": 0.673,
"step": 20
},
{
"epoch": 0.21,
"learning_rate": 6.447499519263001e-05,
"loss": 2.0665,
"step": 21
},
{
"epoch": 0.22,
"learning_rate": 6.437552161310498e-05,
"loss": 2.0354,
"step": 22
},
{
"epoch": 0.23,
"learning_rate": 6.426751418566609e-05,
"loss": 2.1289,
"step": 23
},
{
"epoch": 0.24,
"learning_rate": 6.415100182651334e-05,
"loss": 2.0742,
"step": 24
},
{
"epoch": 0.25,
"learning_rate": 6.402601572882268e-05,
"loss": 2.0412,
"step": 25
},
{
"epoch": 0.26,
"learning_rate": 6.38925893543947e-05,
"loss": 2.1974,
"step": 26
},
{
"epoch": 0.27,
"learning_rate": 6.375075842469626e-05,
"loss": 2.196,
"step": 27
},
{
"epoch": 0.28,
"learning_rate": 6.360056091129678e-05,
"loss": 2.0775,
"step": 28
},
{
"epoch": 0.29,
"learning_rate": 6.344203702570254e-05,
"loss": 2.0366,
"step": 29
},
{
"epoch": 0.3,
"learning_rate": 6.327522920859093e-05,
"loss": 2.1438,
"step": 30
},
{
"epoch": 0.31,
"learning_rate": 6.310018211844817e-05,
"loss": 2.0639,
"step": 31
},
{
"epoch": 0.32,
"learning_rate": 6.291694261961308e-05,
"loss": 2.1794,
"step": 32
},
{
"epoch": 0.33,
"learning_rate": 6.272555976973039e-05,
"loss": 2.0592,
"step": 33
},
{
"epoch": 0.34,
"learning_rate": 6.252608480661681e-05,
"loss": 2.1628,
"step": 34
},
{
"epoch": 0.35,
"learning_rate": 6.231857113454346e-05,
"loss": 2.1851,
"step": 35
},
{
"epoch": 0.36,
"learning_rate": 6.21030743099382e-05,
"loss": 2.0531,
"step": 36
},
{
"epoch": 0.37,
"learning_rate": 6.18796520265119e-05,
"loss": 2.0284,
"step": 37
},
{
"epoch": 0.38,
"learning_rate": 6.164836409981237e-05,
"loss": 2.2359,
"step": 38
},
{
"epoch": 0.39,
"learning_rate": 6.140927245121032e-05,
"loss": 1.8967,
"step": 39
},
{
"epoch": 0.4,
"learning_rate": 6.116244109132153e-05,
"loss": 2.0275,
"step": 40
},
{
"epoch": 0.4,
"eval_loss": 1.9861761331558228,
"eval_runtime": 1.4896,
"eval_samples_per_second": 5.37,
"eval_steps_per_second": 0.671,
"step": 40
},
{
"epoch": 0.41,
"learning_rate": 6.0907936102869656e-05,
"loss": 2.0316,
"step": 41
},
{
"epoch": 0.42,
"learning_rate": 6.0645825622994254e-05,
"loss": 2.0768,
"step": 42
},
{
"epoch": 0.43,
"learning_rate": 6.037617982500884e-05,
"loss": 2.0058,
"step": 43
},
{
"epoch": 0.44,
"learning_rate": 6.009907089961381e-05,
"loss": 2.2319,
"step": 44
},
{
"epoch": 0.45,
"learning_rate": 5.981457303556923e-05,
"loss": 2.1693,
"step": 45
},
{
"epoch": 0.46,
"learning_rate": 5.9522762399832716e-05,
"loss": 2.0858,
"step": 46
},
{
"epoch": 0.47,
"learning_rate": 5.922371711716768e-05,
"loss": 2.0576,
"step": 47
},
{
"epoch": 0.48,
"learning_rate": 5.891751724922749e-05,
"loss": 1.9946,
"step": 48
},
{
"epoch": 0.49,
"learning_rate": 5.860424477312095e-05,
"loss": 2.0166,
"step": 49
},
{
"epoch": 0.5,
"learning_rate": 5.828398355946514e-05,
"loss": 2.1482,
"step": 50
},
{
"epoch": 0.5,
"learning_rate": 5.795681934993113e-05,
"loss": 2.0698,
"step": 51
},
{
"epoch": 0.51,
"learning_rate": 5.7622839734288945e-05,
"loss": 1.9317,
"step": 52
},
{
"epoch": 0.52,
"learning_rate": 5.728213412695761e-05,
"loss": 2.1399,
"step": 53
},
{
"epoch": 0.53,
"learning_rate": 5.693479374306676e-05,
"loss": 2.1637,
"step": 54
},
{
"epoch": 0.54,
"learning_rate": 5.658091157403617e-05,
"loss": 2.0557,
"step": 55
},
{
"epoch": 0.55,
"learning_rate": 5.622058236267965e-05,
"loss": 1.926,
"step": 56
},
{
"epoch": 0.56,
"learning_rate": 5.585390257784018e-05,
"loss": 2.0984,
"step": 57
},
{
"epoch": 0.57,
"learning_rate": 5.548097038856279e-05,
"loss": 2.0428,
"step": 58
},
{
"epoch": 0.58,
"learning_rate": 5.5101885637812374e-05,
"loss": 1.9624,
"step": 59
},
{
"epoch": 0.59,
"learning_rate": 5.4716749815743304e-05,
"loss": 1.913,
"step": 60
},
{
"epoch": 0.59,
"eval_loss": 1.97765052318573,
"eval_runtime": 1.4886,
"eval_samples_per_second": 5.374,
"eval_steps_per_second": 0.672,
"step": 60
},
{
"epoch": 0.6,
"learning_rate": 5.432566603252809e-05,
"loss": 2.0125,
"step": 61
},
{
"epoch": 0.61,
"learning_rate": 5.3928738990752234e-05,
"loss": 2.0721,
"step": 62
},
{
"epoch": 0.62,
"learning_rate": 5.3526074957382866e-05,
"loss": 2.1046,
"step": 63
},
{
"epoch": 0.63,
"learning_rate": 5.311778173531847e-05,
"loss": 2.0074,
"step": 64
},
{
"epoch": 0.64,
"learning_rate": 5.2703968634527514e-05,
"loss": 2.1667,
"step": 65
},
{
"epoch": 0.65,
"learning_rate": 5.2284746442783414e-05,
"loss": 2.0677,
"step": 66
},
{
"epoch": 0.66,
"learning_rate": 5.186022739600408e-05,
"loss": 2.1798,
"step": 67
},
{
"epoch": 0.67,
"learning_rate": 5.143052514820357e-05,
"loss": 2.105,
"step": 68
},
{
"epoch": 0.68,
"learning_rate": 5.099575474106419e-05,
"loss": 2.118,
"step": 69
},
{
"epoch": 0.69,
"learning_rate": 5.055603257313707e-05,
"loss": 2.0791,
"step": 70
},
{
"epoch": 0.7,
"learning_rate": 5.011147636867943e-05,
"loss": 2.032,
"step": 71
},
{
"epoch": 0.71,
"learning_rate": 4.9662205146136955e-05,
"loss": 1.9331,
"step": 72
},
{
"epoch": 0.72,
"learning_rate": 4.92083391862797e-05,
"loss": 2.0077,
"step": 73
},
{
"epoch": 0.73,
"learning_rate": 4.874999999999999e-05,
"loss": 2.0253,
"step": 74
},
{
"epoch": 0.74,
"learning_rate": 4.828731029578105e-05,
"loss": 1.9922,
"step": 75
},
{
"epoch": 0.75,
"learning_rate": 4.7820393946844926e-05,
"loss": 2.0295,
"step": 76
},
{
"epoch": 0.76,
"learning_rate": 4.734937595798867e-05,
"loss": 1.9514,
"step": 77
},
{
"epoch": 0.77,
"learning_rate": 4.687438243211754e-05,
"loss": 2.1018,
"step": 78
},
{
"epoch": 0.78,
"learning_rate": 4.639554053648416e-05,
"loss": 2.1032,
"step": 79
},
{
"epoch": 0.79,
"learning_rate": 4.5912978468642824e-05,
"loss": 2.1258,
"step": 80
},
{
"epoch": 0.79,
"eval_loss": 1.972991704940796,
"eval_runtime": 1.494,
"eval_samples_per_second": 5.355,
"eval_steps_per_second": 0.669,
"step": 80
},
{
"epoch": 0.8,
"learning_rate": 4.542682542212785e-05,
"loss": 2.0353,
"step": 81
},
{
"epoch": 0.81,
"learning_rate": 4.4937211551865415e-05,
"loss": 1.9987,
"step": 82
},
{
"epoch": 0.82,
"learning_rate": 4.444426793932787e-05,
"loss": 1.8856,
"step": 83
},
{
"epoch": 0.83,
"learning_rate": 4.3948126557440085e-05,
"loss": 2.0904,
"step": 84
},
{
"epoch": 0.84,
"learning_rate": 4.344892023524714e-05,
"loss": 2.0685,
"step": 85
},
{
"epoch": 0.85,
"learning_rate": 4.2946782622352746e-05,
"loss": 2.1,
"step": 86
},
{
"epoch": 0.86,
"learning_rate": 4.244184815313799e-05,
"loss": 1.9322,
"step": 87
},
{
"epoch": 0.87,
"learning_rate": 4.193425201077002e-05,
"loss": 2.194,
"step": 88
},
{
"epoch": 0.88,
"learning_rate": 4.14241300910103e-05,
"loss": 2.0455,
"step": 89
},
{
"epoch": 0.89,
"learning_rate": 4.091161896583192e-05,
"loss": 2.1239,
"step": 90
},
{
"epoch": 0.9,
"learning_rate": 4.0396855846856076e-05,
"loss": 2.142,
"step": 91
},
{
"epoch": 0.91,
"learning_rate": 3.9879978548617125e-05,
"loss": 2.2304,
"step": 92
},
{
"epoch": 0.92,
"learning_rate": 3.936112545166636e-05,
"loss": 1.925,
"step": 93
},
{
"epoch": 0.93,
"learning_rate": 3.884043546552417e-05,
"loss": 2.1547,
"step": 94
},
{
"epoch": 0.94,
"learning_rate": 3.831804799149057e-05,
"loss": 2.0439,
"step": 95
},
{
"epoch": 0.95,
"learning_rate": 3.779410288532413e-05,
"loss": 1.9763,
"step": 96
},
{
"epoch": 0.96,
"learning_rate": 3.726874041979925e-05,
"loss": 2.114,
"step": 97
},
{
"epoch": 0.97,
"learning_rate": 3.674210124715168e-05,
"loss": 2.0293,
"step": 98
},
{
"epoch": 0.98,
"learning_rate": 3.621432636142251e-05,
"loss": 2.0833,
"step": 99
},
{
"epoch": 0.99,
"learning_rate": 3.5685557060710726e-05,
"loss": 2.0452,
"step": 100
},
{
"epoch": 0.99,
"eval_loss": 1.9682413339614868,
"eval_runtime": 1.4934,
"eval_samples_per_second": 5.357,
"eval_steps_per_second": 0.67,
"step": 100
},
{
"epoch": 1.0,
"learning_rate": 3.5155934909344214e-05,
"loss": 1.9682,
"step": 101
},
{
"epoch": 1.01,
"learning_rate": 3.462560169997965e-05,
"loss": 2.0723,
"step": 102
},
{
"epoch": 1.02,
"learning_rate": 3.409469941564109e-05,
"loss": 2.0713,
"step": 103
},
{
"epoch": 1.03,
"learning_rate": 3.356337019170772e-05,
"loss": 2.067,
"step": 104
},
{
"epoch": 1.04,
"learning_rate": 3.303175627786082e-05,
"loss": 1.934,
"step": 105
},
{
"epoch": 1.05,
"learning_rate": 3.25e-05,
"loss": 2.2083,
"step": 106
},
{
"epoch": 1.06,
"learning_rate": 3.1968243722139173e-05,
"loss": 2.045,
"step": 107
},
{
"epoch": 1.07,
"learning_rate": 3.143662980829228e-05,
"loss": 1.9531,
"step": 108
},
{
"epoch": 1.08,
"learning_rate": 3.090530058435891e-05,
"loss": 2.0913,
"step": 109
},
{
"epoch": 1.09,
"learning_rate": 3.0374398300020347e-05,
"loss": 1.9211,
"step": 110
},
{
"epoch": 1.1,
"learning_rate": 2.984406509065579e-05,
"loss": 2.0983,
"step": 111
},
{
"epoch": 1.11,
"learning_rate": 2.9314442939289275e-05,
"loss": 2.1621,
"step": 112
},
{
"epoch": 1.12,
"learning_rate": 2.8785673638577486e-05,
"loss": 2.1728,
"step": 113
},
{
"epoch": 1.13,
"learning_rate": 2.825789875284833e-05,
"loss": 2.0512,
"step": 114
},
{
"epoch": 1.14,
"learning_rate": 2.773125958020074e-05,
"loss": 2.0462,
"step": 115
},
{
"epoch": 1.15,
"learning_rate": 2.7205897114675866e-05,
"loss": 1.9357,
"step": 116
},
{
"epoch": 1.16,
"learning_rate": 2.668195200850944e-05,
"loss": 1.8384,
"step": 117
},
{
"epoch": 1.17,
"learning_rate": 2.6159564534475832e-05,
"loss": 1.9649,
"step": 118
},
{
"epoch": 1.18,
"learning_rate": 2.563887454833363e-05,
"loss": 2.0262,
"step": 119
},
{
"epoch": 1.19,
"learning_rate": 2.5120021451382872e-05,
"loss": 2.1315,
"step": 120
},
{
"epoch": 1.19,
"eval_loss": 1.9639294147491455,
"eval_runtime": 1.4959,
"eval_samples_per_second": 5.348,
"eval_steps_per_second": 0.668,
"step": 120
},
{
"epoch": 1.2,
"learning_rate": 2.4603144153143925e-05,
"loss": 1.9284,
"step": 121
},
{
"epoch": 1.21,
"learning_rate": 2.4088381034168072e-05,
"loss": 2.0671,
"step": 122
},
{
"epoch": 1.22,
"learning_rate": 2.3575869908989705e-05,
"loss": 2.0938,
"step": 123
},
{
"epoch": 1.23,
"learning_rate": 2.306574798922998e-05,
"loss": 1.9189,
"step": 124
},
{
"epoch": 1.24,
"learning_rate": 2.2558151846862005e-05,
"loss": 2.0635,
"step": 125
},
{
"epoch": 1.25,
"learning_rate": 2.2053217377647255e-05,
"loss": 2.1367,
"step": 126
},
{
"epoch": 1.26,
"learning_rate": 2.1551079764752848e-05,
"loss": 1.9967,
"step": 127
},
{
"epoch": 1.27,
"learning_rate": 2.105187344255991e-05,
"loss": 2.1994,
"step": 128
},
{
"epoch": 1.28,
"learning_rate": 2.0555732060672138e-05,
"loss": 2.151,
"step": 129
},
{
"epoch": 1.29,
"learning_rate": 2.0062788448134583e-05,
"loss": 2.0517,
"step": 130
},
{
"epoch": 1.3,
"learning_rate": 1.957317457787214e-05,
"loss": 2.0584,
"step": 131
},
{
"epoch": 1.31,
"learning_rate": 1.908702153135717e-05,
"loss": 2.0868,
"step": 132
},
{
"epoch": 1.32,
"learning_rate": 1.860445946351584e-05,
"loss": 1.9646,
"step": 133
},
{
"epoch": 1.33,
"learning_rate": 1.8125617567882463e-05,
"loss": 1.986,
"step": 134
},
{
"epoch": 1.34,
"learning_rate": 1.7650624042011325e-05,
"loss": 1.9015,
"step": 135
},
{
"epoch": 1.35,
"learning_rate": 1.7179606053155072e-05,
"loss": 2.1968,
"step": 136
},
{
"epoch": 1.36,
"learning_rate": 1.6712689704218944e-05,
"loss": 1.9268,
"step": 137
},
{
"epoch": 1.37,
"learning_rate": 1.6250000000000005e-05,
"loss": 2.0205,
"step": 138
},
{
"epoch": 1.38,
"learning_rate": 1.5791660813720294e-05,
"loss": 2.1183,
"step": 139
},
{
"epoch": 1.39,
"learning_rate": 1.533779485386304e-05,
"loss": 2.08,
"step": 140
},
{
"epoch": 1.39,
"eval_loss": 1.9603089094161987,
"eval_runtime": 1.495,
"eval_samples_per_second": 5.351,
"eval_steps_per_second": 0.669,
"step": 140
},
{
"epoch": 1.4,
"learning_rate": 1.4888523631320579e-05,
"loss": 1.9309,
"step": 141
},
{
"epoch": 1.41,
"learning_rate": 1.4443967426862935e-05,
"loss": 2.0579,
"step": 142
},
{
"epoch": 1.42,
"learning_rate": 1.4004245258935799e-05,
"loss": 1.8377,
"step": 143
},
{
"epoch": 1.43,
"learning_rate": 1.3569474851796432e-05,
"loss": 2.0764,
"step": 144
},
{
"epoch": 1.44,
"learning_rate": 1.3139772603995914e-05,
"loss": 2.1289,
"step": 145
},
{
"epoch": 1.45,
"learning_rate": 1.2715253557216577e-05,
"loss": 2.041,
"step": 146
},
{
"epoch": 1.46,
"learning_rate": 1.2296031365472491e-05,
"loss": 1.9343,
"step": 147
},
{
"epoch": 1.47,
"learning_rate": 1.1882218264681525e-05,
"loss": 1.9665,
"step": 148
},
{
"epoch": 1.48,
"learning_rate": 1.1473925042617137e-05,
"loss": 2.1843,
"step": 149
},
{
"epoch": 1.49,
"learning_rate": 1.1071261009247762e-05,
"loss": 2.0125,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 202,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 1.2497211511065805e+17,
"trial_name": null,
"trial_params": null
}