File size: 4,568 Bytes
a34107f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 12300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.41,
"grad_norm": 29.442007064819336,
"learning_rate": 4.796747967479675e-05,
"loss": 1.1714,
"step": 500
},
{
"epoch": 0.81,
"grad_norm": 17.30375099182129,
"learning_rate": 4.59349593495935e-05,
"loss": 0.5578,
"step": 1000
},
{
"epoch": 1.22,
"grad_norm": 9.899393081665039,
"learning_rate": 4.390243902439025e-05,
"loss": 0.4015,
"step": 1500
},
{
"epoch": 1.63,
"grad_norm": 8.312630653381348,
"learning_rate": 4.186991869918699e-05,
"loss": 0.3135,
"step": 2000
},
{
"epoch": 2.03,
"grad_norm": 7.51059627532959,
"learning_rate": 3.983739837398374e-05,
"loss": 0.2809,
"step": 2500
},
{
"epoch": 2.44,
"grad_norm": 5.824585914611816,
"learning_rate": 3.780487804878049e-05,
"loss": 0.2045,
"step": 3000
},
{
"epoch": 2.85,
"grad_norm": 20.64141845703125,
"learning_rate": 3.577235772357724e-05,
"loss": 0.1899,
"step": 3500
},
{
"epoch": 3.25,
"grad_norm": 4.098996162414551,
"learning_rate": 3.373983739837399e-05,
"loss": 0.156,
"step": 4000
},
{
"epoch": 3.66,
"grad_norm": 11.682610511779785,
"learning_rate": 3.170731707317073e-05,
"loss": 0.1367,
"step": 4500
},
{
"epoch": 4.07,
"grad_norm": 1.980808138847351,
"learning_rate": 2.9674796747967482e-05,
"loss": 0.1321,
"step": 5000
},
{
"epoch": 4.47,
"grad_norm": 21.24442481994629,
"learning_rate": 2.764227642276423e-05,
"loss": 0.1024,
"step": 5500
},
{
"epoch": 4.88,
"grad_norm": 10.403449058532715,
"learning_rate": 2.5609756097560977e-05,
"loss": 0.0965,
"step": 6000
},
{
"epoch": 5.28,
"grad_norm": 6.274624824523926,
"learning_rate": 2.3577235772357724e-05,
"loss": 0.0811,
"step": 6500
},
{
"epoch": 5.69,
"grad_norm": 18.304744720458984,
"learning_rate": 2.1544715447154475e-05,
"loss": 0.0787,
"step": 7000
},
{
"epoch": 6.1,
"grad_norm": 6.117405891418457,
"learning_rate": 1.9512195121951222e-05,
"loss": 0.0636,
"step": 7500
},
{
"epoch": 6.5,
"grad_norm": 6.909726619720459,
"learning_rate": 1.747967479674797e-05,
"loss": 0.0578,
"step": 8000
},
{
"epoch": 6.91,
"grad_norm": 7.566128253936768,
"learning_rate": 1.5447154471544717e-05,
"loss": 0.0564,
"step": 8500
},
{
"epoch": 7.32,
"grad_norm": 3.8233773708343506,
"learning_rate": 1.3414634146341466e-05,
"loss": 0.0474,
"step": 9000
},
{
"epoch": 7.72,
"grad_norm": 9.988646507263184,
"learning_rate": 1.1382113821138211e-05,
"loss": 0.0394,
"step": 9500
},
{
"epoch": 8.13,
"grad_norm": 7.508295059204102,
"learning_rate": 9.34959349593496e-06,
"loss": 0.0339,
"step": 10000
},
{
"epoch": 8.54,
"grad_norm": 8.255553245544434,
"learning_rate": 7.317073170731707e-06,
"loss": 0.0279,
"step": 10500
},
{
"epoch": 8.94,
"grad_norm": 0.14910294115543365,
"learning_rate": 5.2845528455284555e-06,
"loss": 0.0248,
"step": 11000
},
{
"epoch": 9.35,
"grad_norm": 0.014114579185843468,
"learning_rate": 3.2520325203252037e-06,
"loss": 0.018,
"step": 11500
},
{
"epoch": 9.76,
"grad_norm": 0.12875856459140778,
"learning_rate": 1.2195121951219514e-06,
"loss": 0.0182,
"step": 12000
},
{
"epoch": 10.0,
"step": 12300,
"total_flos": 7.078965077743022e+16,
"train_loss": 0.17480758806554283,
"train_runtime": 299478.6207,
"train_samples_per_second": 1.314,
"train_steps_per_second": 0.041
}
],
"logging_steps": 500,
"max_steps": 12300,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 7.078965077743022e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}
|