|
{ |
|
"best_metric": 0.016997970640659332, |
|
"best_model_checkpoint": "output_qwen_7b_gt_only_new/checkpoint-1850", |
|
"epoch": 4.852459016393443, |
|
"eval_steps": 50, |
|
"global_step": 1850, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"loss": 3.7144, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 3.7383, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.846153846153847e-06, |
|
"loss": 3.7315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 3.832, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.41025641025641e-06, |
|
"loss": 3.9406, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 3.9295, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.974358974358976e-06, |
|
"loss": 3.7117, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 4.1448, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.153846153846154e-05, |
|
"loss": 3.8611, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 3.7229, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.4102564102564104e-05, |
|
"loss": 3.4408, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 3.3446, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 3.7562, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 3.4758, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 3.6359, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 3.5937, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.1794871794871795e-05, |
|
"loss": 3.5099, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 3.7363, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.435897435897436e-05, |
|
"loss": 3.4163, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 3.2308, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.6923076923076923e-05, |
|
"loss": 3.316, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 3.0125, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.948717948717949e-05, |
|
"loss": 3.1502, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 3.0518, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.205128205128206e-05, |
|
"loss": 2.8666, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.5435, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.461538461538462e-05, |
|
"loss": 2.7219, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 2.534, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.717948717948718e-05, |
|
"loss": 2.5938, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 2.2513, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.974358974358974e-05, |
|
"loss": 2.258, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 2.0368, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.230769230769231e-05, |
|
"loss": 1.8706, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 1.9183, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.4871794871794874e-05, |
|
"loss": 1.7306, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 1.6126, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.7435897435897435e-05, |
|
"loss": 1.5315, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 1.3916, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 5e-05, |
|
"loss": 1.3437, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.9999991324456964e-05, |
|
"loss": 1.1718, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.999996529783388e-05, |
|
"loss": 1.1439, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.9999921920148804e-05, |
|
"loss": 1.2322, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.999986119143186e-05, |
|
"loss": 1.106, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.999978311172517e-05, |
|
"loss": 1.0669, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.999968768108295e-05, |
|
"loss": 1.0308, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.999957489957142e-05, |
|
"loss": 1.1053, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.999944476726885e-05, |
|
"loss": 0.9832, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.999929728426558e-05, |
|
"loss": 0.9761, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.999913245066394e-05, |
|
"loss": 0.933, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.999895026657835e-05, |
|
"loss": 0.7558, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_loss": 0.9146232008934021, |
|
"eval_runtime": 26.9094, |
|
"eval_samples_per_second": 22.297, |
|
"eval_steps_per_second": 2.787, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.9998750732135255e-05, |
|
"loss": 0.8559, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.999853384747313e-05, |
|
"loss": 0.8866, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.9998299612742505e-05, |
|
"loss": 0.9498, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.999804802810596e-05, |
|
"loss": 0.8116, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.999777909373809e-05, |
|
"loss": 0.9047, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.9997492809825556e-05, |
|
"loss": 0.898, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.9997189176567046e-05, |
|
"loss": 0.8543, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.9996868194173306e-05, |
|
"loss": 0.7572, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.999652986286711e-05, |
|
"loss": 0.79, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.999617418288326e-05, |
|
"loss": 0.7742, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.9995801154468624e-05, |
|
"loss": 0.8558, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.99954107778821e-05, |
|
"loss": 0.7483, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.999500305339462e-05, |
|
"loss": 0.8833, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.999457798128917e-05, |
|
"loss": 0.7535, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.9994135561860764e-05, |
|
"loss": 0.7344, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.999367579541647e-05, |
|
"loss": 0.8373, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.9993198682275365e-05, |
|
"loss": 0.6668, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.9992704222768606e-05, |
|
"loss": 0.7379, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.999219241723937e-05, |
|
"loss": 0.7731, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.9991663266042854e-05, |
|
"loss": 0.6745, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.999111676954632e-05, |
|
"loss": 0.6995, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.999055292812908e-05, |
|
"loss": 0.7747, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.9989971742182426e-05, |
|
"loss": 0.7372, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.998937321210976e-05, |
|
"loss": 0.7959, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.9988757338326474e-05, |
|
"loss": 0.7453, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.998812412126001e-05, |
|
"loss": 0.652, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.998747356134985e-05, |
|
"loss": 0.7298, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.998680565904752e-05, |
|
"loss": 0.7309, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.9986120414816565e-05, |
|
"loss": 0.7156, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.9985417829132563e-05, |
|
"loss": 0.7668, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.998469790248316e-05, |
|
"loss": 0.6943, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.9983960635368e-05, |
|
"loss": 0.5747, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.998320602829879e-05, |
|
"loss": 0.725, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.998243408179925e-05, |
|
"loss": 0.6572, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.9981644796405154e-05, |
|
"loss": 0.637, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.998083817266429e-05, |
|
"loss": 0.6354, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.9980014211136505e-05, |
|
"loss": 0.6864, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.997917291239365e-05, |
|
"loss": 0.5963, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.9978314277019625e-05, |
|
"loss": 0.6064, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.9977438305610366e-05, |
|
"loss": 0.6303, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.9976544998773837e-05, |
|
"loss": 0.6072, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.997563435713002e-05, |
|
"loss": 0.5951, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.997470638131094e-05, |
|
"loss": 0.6087, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.997376107196067e-05, |
|
"loss": 0.6461, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.997279842973529e-05, |
|
"loss": 0.6479, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.99718184553029e-05, |
|
"loss": 0.6044, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.997082114934366e-05, |
|
"loss": 0.6382, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.996980651254974e-05, |
|
"loss": 0.6603, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.9968774545625344e-05, |
|
"loss": 0.5042, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.996772524928669e-05, |
|
"loss": 0.6829, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_loss": 0.639582097530365, |
|
"eval_runtime": 27.0017, |
|
"eval_samples_per_second": 22.221, |
|
"eval_steps_per_second": 2.778, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.9966658624262044e-05, |
|
"loss": 0.518, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.99655746712917e-05, |
|
"loss": 0.6625, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.996447339112795e-05, |
|
"loss": 0.5904, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.996335478453514e-05, |
|
"loss": 0.6856, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.996221885228964e-05, |
|
"loss": 0.5244, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.996106559517982e-05, |
|
"loss": 0.581, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.99598950140061e-05, |
|
"loss": 0.5233, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.99587071095809e-05, |
|
"loss": 0.6757, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.9957501882728705e-05, |
|
"loss": 0.6409, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.995627933428597e-05, |
|
"loss": 0.6027, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.99550394651012e-05, |
|
"loss": 0.576, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.9953782276034946e-05, |
|
"loss": 0.5882, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.995250776795971e-05, |
|
"loss": 0.5466, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.9951215941760075e-05, |
|
"loss": 0.6233, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.9949906798332636e-05, |
|
"loss": 0.6017, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.9948580338585974e-05, |
|
"loss": 0.6036, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.9947236563440726e-05, |
|
"loss": 0.4385, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.994587547382952e-05, |
|
"loss": 0.5811, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.994449707069702e-05, |
|
"loss": 0.6104, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.994310135499989e-05, |
|
"loss": 0.5636, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.9941688327706826e-05, |
|
"loss": 0.5811, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.9940257989798524e-05, |
|
"loss": 0.5375, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.9938810342267695e-05, |
|
"loss": 0.5879, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.9937345386119085e-05, |
|
"loss": 0.4887, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.9935863122369416e-05, |
|
"loss": 0.5587, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.993436355204746e-05, |
|
"loss": 0.5162, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.993284667619399e-05, |
|
"loss": 0.5298, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.993131249586176e-05, |
|
"loss": 0.57, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.992976101211558e-05, |
|
"loss": 0.5001, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.992819222603223e-05, |
|
"loss": 0.4982, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.992660613870053e-05, |
|
"loss": 0.545, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.992500275122128e-05, |
|
"loss": 0.4539, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.9923382064707315e-05, |
|
"loss": 0.5473, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.992174408028345e-05, |
|
"loss": 0.5759, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.992008879908653e-05, |
|
"loss": 0.5563, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.991841622226537e-05, |
|
"loss": 0.4783, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.991672635098084e-05, |
|
"loss": 0.503, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.9915019186405766e-05, |
|
"loss": 0.5458, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.991329472972499e-05, |
|
"loss": 0.4992, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.991155298213538e-05, |
|
"loss": 0.5645, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.990979394484576e-05, |
|
"loss": 0.4729, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.9908017619077e-05, |
|
"loss": 0.4912, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.990622400606193e-05, |
|
"loss": 0.4898, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.99044131070454e-05, |
|
"loss": 0.5889, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.9902584923284264e-05, |
|
"loss": 0.4933, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.9900739456047345e-05, |
|
"loss": 0.4551, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.989887670661549e-05, |
|
"loss": 0.4577, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.989699667628152e-05, |
|
"loss": 0.4432, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.989509936635026e-05, |
|
"loss": 0.465, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.989318477813853e-05, |
|
"loss": 0.4931, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_loss": 0.5148335099220276, |
|
"eval_runtime": 26.8859, |
|
"eval_samples_per_second": 22.317, |
|
"eval_steps_per_second": 2.79, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.989125291297513e-05, |
|
"loss": 0.5112, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.988930377220087e-05, |
|
"loss": 0.4956, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.988733735716852e-05, |
|
"loss": 0.4955, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.9885353669242866e-05, |
|
"loss": 0.5444, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.988335270980068e-05, |
|
"loss": 0.4508, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.9881334480230716e-05, |
|
"loss": 0.4569, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.9879298981933694e-05, |
|
"loss": 0.3779, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.987724621632236e-05, |
|
"loss": 0.4522, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.987517618482142e-05, |
|
"loss": 0.3876, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.987308888886755e-05, |
|
"loss": 0.4106, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.987098432990943e-05, |
|
"loss": 0.4362, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.9868862509407724e-05, |
|
"loss": 0.4347, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.9866723428835066e-05, |
|
"loss": 0.4685, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.986456708967606e-05, |
|
"loss": 0.5076, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.986239349342732e-05, |
|
"loss": 0.4966, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.9860202641597384e-05, |
|
"loss": 0.4583, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.9857994535706834e-05, |
|
"loss": 0.5343, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.985576917728817e-05, |
|
"loss": 0.5018, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.985352656788589e-05, |
|
"loss": 0.3912, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.985126670905646e-05, |
|
"loss": 0.3894, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.984898960236832e-05, |
|
"loss": 0.458, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.98466952494019e-05, |
|
"loss": 0.434, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.984438365174956e-05, |
|
"loss": 0.4502, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.984205481101565e-05, |
|
"loss": 0.3437, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.98397087288165e-05, |
|
"loss": 0.4711, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.983734540678038e-05, |
|
"loss": 0.4713, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.983496484654755e-05, |
|
"loss": 0.4109, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.983256704977021e-05, |
|
"loss": 0.3906, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.983015201811254e-05, |
|
"loss": 0.3827, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.9827719753250675e-05, |
|
"loss": 0.4998, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.982527025687271e-05, |
|
"loss": 0.5082, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.982280353067872e-05, |
|
"loss": 0.4098, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.9820319576380706e-05, |
|
"loss": 0.3899, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.9817818395702634e-05, |
|
"loss": 0.4118, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.981529999038044e-05, |
|
"loss": 0.4021, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.9812764362162e-05, |
|
"loss": 0.4317, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.9810211512807166e-05, |
|
"loss": 0.3808, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.980764144408771e-05, |
|
"loss": 0.4484, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.980505415778738e-05, |
|
"loss": 0.3865, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.980244965570186e-05, |
|
"loss": 0.3782, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.979982793963879e-05, |
|
"loss": 0.3953, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.979718901141776e-05, |
|
"loss": 0.341, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.979453287287029e-05, |
|
"loss": 0.4293, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.979185952583987e-05, |
|
"loss": 0.3317, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.97891689721819e-05, |
|
"loss": 0.5515, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.978646121376376e-05, |
|
"loss": 0.4385, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.9783736252464744e-05, |
|
"loss": 0.4524, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.978099409017609e-05, |
|
"loss": 0.384, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.9778234728800975e-05, |
|
"loss": 0.3622, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.977545817025453e-05, |
|
"loss": 0.4031, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_loss": 0.4207773804664612, |
|
"eval_runtime": 27.0247, |
|
"eval_samples_per_second": 22.202, |
|
"eval_steps_per_second": 2.775, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.97726644164638e-05, |
|
"loss": 0.4301, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.976985346936776e-05, |
|
"loss": 0.3188, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.976702533091735e-05, |
|
"loss": 0.3839, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.97641800030754e-05, |
|
"loss": 0.331, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.976131748781671e-05, |
|
"loss": 0.4465, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.9758437787127974e-05, |
|
"loss": 0.3405, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.975554090300785e-05, |
|
"loss": 0.3813, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.9752626837466875e-05, |
|
"loss": 0.343, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.9749695592527555e-05, |
|
"loss": 0.4073, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.9746747170224306e-05, |
|
"loss": 0.4009, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.9743781572603445e-05, |
|
"loss": 0.3842, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.9740798801723235e-05, |
|
"loss": 0.3068, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.973779885965384e-05, |
|
"loss": 0.3705, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.9734781748477355e-05, |
|
"loss": 0.4452, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.9731747470287795e-05, |
|
"loss": 0.2932, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.972869602719107e-05, |
|
"loss": 0.3706, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.972562742130502e-05, |
|
"loss": 0.4109, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.972254165475938e-05, |
|
"loss": 0.3886, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.971943872969582e-05, |
|
"loss": 0.3941, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.9716318648267904e-05, |
|
"loss": 0.3429, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.971318141264109e-05, |
|
"loss": 0.3953, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.9710027024992775e-05, |
|
"loss": 0.3593, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.970685548751222e-05, |
|
"loss": 0.3499, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.970366680240063e-05, |
|
"loss": 0.2849, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.9700460971871076e-05, |
|
"loss": 0.3127, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.969723799814855e-05, |
|
"loss": 0.3821, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.969399788346994e-05, |
|
"loss": 0.3157, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.969074063008402e-05, |
|
"loss": 0.4177, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.9687466240251465e-05, |
|
"loss": 0.3918, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.968417471624485e-05, |
|
"loss": 0.3505, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.968086606034862e-05, |
|
"loss": 0.3169, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.9677540274859145e-05, |
|
"loss": 0.3477, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.967419736208466e-05, |
|
"loss": 0.3797, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.967083732434529e-05, |
|
"loss": 0.2928, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.966746016397303e-05, |
|
"loss": 0.3138, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.966406588331181e-05, |
|
"loss": 0.3771, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.966065448471739e-05, |
|
"loss": 0.3108, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.965722597055742e-05, |
|
"loss": 0.3732, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.965378034321144e-05, |
|
"loss": 0.3365, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.9650317605070886e-05, |
|
"loss": 0.333, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.9646837758539026e-05, |
|
"loss": 0.36, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.964334080603103e-05, |
|
"loss": 0.2805, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.963982674997395e-05, |
|
"loss": 0.3268, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.963629559280668e-05, |
|
"loss": 0.3853, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.963274733697999e-05, |
|
"loss": 0.3204, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.962918198495654e-05, |
|
"loss": 0.3035, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.9625599539210825e-05, |
|
"loss": 0.2801, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.962200000222923e-05, |
|
"loss": 0.2999, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.961838337650997e-05, |
|
"loss": 0.2818, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.961474966456317e-05, |
|
"loss": 0.3822, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 0.3398887515068054, |
|
"eval_runtime": 26.9438, |
|
"eval_samples_per_second": 22.269, |
|
"eval_steps_per_second": 2.784, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.961109886891076e-05, |
|
"loss": 0.2777, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.9607430992086564e-05, |
|
"loss": 0.2942, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.960374603663624e-05, |
|
"loss": 0.27, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.9600044005117305e-05, |
|
"loss": 0.3825, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.9596324900099145e-05, |
|
"loss": 0.2462, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.959258872416296e-05, |
|
"loss": 0.3602, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.958883547990184e-05, |
|
"loss": 0.3178, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.958506516992069e-05, |
|
"loss": 0.2902, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.958127779683625e-05, |
|
"loss": 0.2936, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.957747336327715e-05, |
|
"loss": 0.3336, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.957365187188382e-05, |
|
"loss": 0.3285, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.956981332530854e-05, |
|
"loss": 0.2905, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.956595772621543e-05, |
|
"loss": 0.2634, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.9562085077280443e-05, |
|
"loss": 0.2824, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.9558195381191364e-05, |
|
"loss": 0.23, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.9554288640647815e-05, |
|
"loss": 0.3001, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.955036485836124e-05, |
|
"loss": 0.3123, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.954642403705492e-05, |
|
"loss": 0.2238, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.9542466179463956e-05, |
|
"loss": 0.2913, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.953849128833526e-05, |
|
"loss": 0.38, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.953449936642759e-05, |
|
"loss": 0.269, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.953049041651151e-05, |
|
"loss": 0.3593, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.952646444136942e-05, |
|
"loss": 0.2931, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.9522421443795496e-05, |
|
"loss": 0.2661, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.951836142659577e-05, |
|
"loss": 0.2888, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.951428439258806e-05, |
|
"loss": 0.2596, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.951019034460202e-05, |
|
"loss": 0.2613, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.9506079285479085e-05, |
|
"loss": 0.287, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.950195121807251e-05, |
|
"loss": 0.3136, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.949780614524736e-05, |
|
"loss": 0.3299, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.949364406988049e-05, |
|
"loss": 0.3073, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.948946499486056e-05, |
|
"loss": 0.2867, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.948526892308803e-05, |
|
"loss": 0.297, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.948105585747517e-05, |
|
"loss": 0.244, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.947682580094601e-05, |
|
"loss": 0.278, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.94725787564364e-05, |
|
"loss": 0.2499, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.9468314726893975e-05, |
|
"loss": 0.2954, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.9464033715278154e-05, |
|
"loss": 0.2678, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.9459735724560154e-05, |
|
"loss": 0.2308, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.945542075772296e-05, |
|
"loss": 0.2117, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.945108881776135e-05, |
|
"loss": 0.2704, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.944673990768187e-05, |
|
"loss": 0.2982, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.944237403050286e-05, |
|
"loss": 0.296, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.943799118925443e-05, |
|
"loss": 0.2265, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.943359138697845e-05, |
|
"loss": 0.33, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.942917462672859e-05, |
|
"loss": 0.2858, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.942474091157027e-05, |
|
"loss": 0.244, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.9420290244580666e-05, |
|
"loss": 0.2833, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.9415822628848755e-05, |
|
"loss": 0.2337, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.9411338067475244e-05, |
|
"loss": 0.2662, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_loss": 0.27772608399391174, |
|
"eval_runtime": 27.1665, |
|
"eval_samples_per_second": 22.086, |
|
"eval_steps_per_second": 2.761, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.940683656357262e-05, |
|
"loss": 0.2667, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.9402318120265115e-05, |
|
"loss": 0.2497, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.9397782740688734e-05, |
|
"loss": 0.2302, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.939323042799121e-05, |
|
"loss": 0.246, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.9388661185332066e-05, |
|
"loss": 0.2551, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.9384075015882544e-05, |
|
"loss": 0.2779, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.9379471922825656e-05, |
|
"loss": 0.2132, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.9374851909356134e-05, |
|
"loss": 0.2284, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.937021497868047e-05, |
|
"loss": 0.241, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.936556113401691e-05, |
|
"loss": 0.2694, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.9360890378595406e-05, |
|
"loss": 0.2252, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.9356202715657675e-05, |
|
"loss": 0.26, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.935149814845715e-05, |
|
"loss": 0.2273, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.934677668025902e-05, |
|
"loss": 0.2259, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.934203831434019e-05, |
|
"loss": 0.2122, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.933728305398927e-05, |
|
"loss": 0.2042, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.933251090250663e-05, |
|
"loss": 0.2797, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.9327721863204354e-05, |
|
"loss": 0.235, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.9322915939406236e-05, |
|
"loss": 0.2502, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.9318093134447804e-05, |
|
"loss": 0.2573, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.9313253451676286e-05, |
|
"loss": 0.254, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.930839689445064e-05, |
|
"loss": 0.2346, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.930352346614152e-05, |
|
"loss": 0.2592, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.9298633170131304e-05, |
|
"loss": 0.2032, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.9293726009814066e-05, |
|
"loss": 0.2311, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.928880198859558e-05, |
|
"loss": 0.2085, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.9283861109893354e-05, |
|
"loss": 0.2458, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.927890337713655e-05, |
|
"loss": 0.2159, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 4.927392879376605e-05, |
|
"loss": 0.2127, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.926893736323446e-05, |
|
"loss": 0.2244, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.926392908900601e-05, |
|
"loss": 0.1844, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.925890397455668e-05, |
|
"loss": 0.1856, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.9253862023374134e-05, |
|
"loss": 0.1896, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.924880323895766e-05, |
|
"loss": 0.2515, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.9243727624818324e-05, |
|
"loss": 0.2378, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.9238635184478784e-05, |
|
"loss": 0.2079, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.9233525921473446e-05, |
|
"loss": 0.1777, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.922839983934834e-05, |
|
"loss": 0.2403, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.922325694166119e-05, |
|
"loss": 0.1945, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.92180972319814e-05, |
|
"loss": 0.2083, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.921292071389002e-05, |
|
"loss": 0.1945, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.92077273909798e-05, |
|
"loss": 0.1997, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.920251726685511e-05, |
|
"loss": 0.2585, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.9197290345132014e-05, |
|
"loss": 0.217, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.919204662943822e-05, |
|
"loss": 0.2329, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.918678612341309e-05, |
|
"loss": 0.2231, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.9181508830707646e-05, |
|
"loss": 0.1705, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.9176214754984566e-05, |
|
"loss": 0.191, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.917090389991816e-05, |
|
"loss": 0.2344, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.91655762691944e-05, |
|
"loss": 0.1979, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"eval_loss": 0.22052711248397827, |
|
"eval_runtime": 26.9339, |
|
"eval_samples_per_second": 22.277, |
|
"eval_steps_per_second": 2.785, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.9160231866510885e-05, |
|
"loss": 0.2315, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.915487069557686e-05, |
|
"loss": 0.2132, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.9149492760113224e-05, |
|
"loss": 0.1852, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.9144098063852485e-05, |
|
"loss": 0.1515, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.913868661053881e-05, |
|
"loss": 0.2082, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.913325840392796e-05, |
|
"loss": 0.2033, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.912781344778737e-05, |
|
"loss": 0.2223, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.912235174589607e-05, |
|
"loss": 0.2051, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.911687330204471e-05, |
|
"loss": 0.1809, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.9111378120035565e-05, |
|
"loss": 0.2251, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.910586620368255e-05, |
|
"loss": 0.1981, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.910033755681116e-05, |
|
"loss": 0.173, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.909479218325852e-05, |
|
"loss": 0.211, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.908923008687335e-05, |
|
"loss": 0.1992, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.908365127151601e-05, |
|
"loss": 0.1609, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.9078055741058416e-05, |
|
"loss": 0.201, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.9072443499384116e-05, |
|
"loss": 0.2366, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.9066814550388254e-05, |
|
"loss": 0.1868, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.9061168897977564e-05, |
|
"loss": 0.1688, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.905550654607038e-05, |
|
"loss": 0.167, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.9049827498596606e-05, |
|
"loss": 0.2089, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.9044131759497755e-05, |
|
"loss": 0.1676, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.903841933272693e-05, |
|
"loss": 0.1566, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.9032690222248775e-05, |
|
"loss": 0.1515, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.902694443203957e-05, |
|
"loss": 0.1876, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.902118196608711e-05, |
|
"loss": 0.2092, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.901540282839085e-05, |
|
"loss": 0.151, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.90096070229617e-05, |
|
"loss": 0.1907, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.9003794553822244e-05, |
|
"loss": 0.1577, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.899796542500657e-05, |
|
"loss": 0.168, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.899211964056036e-05, |
|
"loss": 0.2188, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.898625720454083e-05, |
|
"loss": 0.1593, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.8980378121016765e-05, |
|
"loss": 0.1225, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.8974482394068514e-05, |
|
"loss": 0.1646, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.8968570027787955e-05, |
|
"loss": 0.1843, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.8962641026278536e-05, |
|
"loss": 0.1291, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.895669539365525e-05, |
|
"loss": 0.1243, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.89507331340446e-05, |
|
"loss": 0.125, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.8944754251584676e-05, |
|
"loss": 0.114, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.893875875042507e-05, |
|
"loss": 0.1571, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.893274663472692e-05, |
|
"loss": 0.1226, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.892671790866291e-05, |
|
"loss": 0.1415, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.8920672576417214e-05, |
|
"loss": 0.1283, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.891461064218558e-05, |
|
"loss": 0.1273, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.890853211017523e-05, |
|
"loss": 0.1068, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.890243698460495e-05, |
|
"loss": 0.1221, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.889632526970501e-05, |
|
"loss": 0.1485, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.889019696971721e-05, |
|
"loss": 0.1474, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.888405208889486e-05, |
|
"loss": 0.1306, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.887789063150276e-05, |
|
"loss": 0.1329, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_loss": 0.16800016164779663, |
|
"eval_runtime": 26.9666, |
|
"eval_samples_per_second": 22.25, |
|
"eval_steps_per_second": 2.781, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.887171260181724e-05, |
|
"loss": 0.1142, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.886551800412612e-05, |
|
"loss": 0.1273, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.8859306842728724e-05, |
|
"loss": 0.1353, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.8853079121935865e-05, |
|
"loss": 0.1464, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.8846834846069845e-05, |
|
"loss": 0.1229, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.884057401946446e-05, |
|
"loss": 0.127, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.8834296646465015e-05, |
|
"loss": 0.1538, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.882800273142827e-05, |
|
"loss": 0.1199, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.8821692278722465e-05, |
|
"loss": 0.1144, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.881536529272734e-05, |
|
"loss": 0.109, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.880902177783409e-05, |
|
"loss": 0.1064, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.8802661738445396e-05, |
|
"loss": 0.121, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 4.879628517897541e-05, |
|
"loss": 0.117, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.878989210384972e-05, |
|
"loss": 0.1178, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.878348251750542e-05, |
|
"loss": 0.1234, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.877705642439103e-05, |
|
"loss": 0.1497, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.8770613828966526e-05, |
|
"loss": 0.1085, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.876415473570337e-05, |
|
"loss": 0.1063, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.875767914908445e-05, |
|
"loss": 0.0911, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.87511870736041e-05, |
|
"loss": 0.0866, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.874467851376809e-05, |
|
"loss": 0.1127, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.873815347409367e-05, |
|
"loss": 0.1103, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.8731611959109477e-05, |
|
"loss": 0.111, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.8725053973355616e-05, |
|
"loss": 0.1244, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.871847952138362e-05, |
|
"loss": 0.137, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.871188860775643e-05, |
|
"loss": 0.1005, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.8705281237048436e-05, |
|
"loss": 0.0917, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.869865741384544e-05, |
|
"loss": 0.1539, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.869201714274467e-05, |
|
"loss": 0.112, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.868536042835474e-05, |
|
"loss": 0.1237, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.867868727529573e-05, |
|
"loss": 0.0852, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.867199768819907e-05, |
|
"loss": 0.1352, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.866529167170764e-05, |
|
"loss": 0.1161, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.8658569230475704e-05, |
|
"loss": 0.1532, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.8651830369168925e-05, |
|
"loss": 0.0956, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 4.8645075092464364e-05, |
|
"loss": 0.123, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.863830340505048e-05, |
|
"loss": 0.0959, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.863151531162712e-05, |
|
"loss": 0.0932, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.862471081690552e-05, |
|
"loss": 0.1418, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.861788992560828e-05, |
|
"loss": 0.1089, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.86110526424694e-05, |
|
"loss": 0.1027, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.860419897223427e-05, |
|
"loss": 0.0984, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.8597328919659605e-05, |
|
"loss": 0.1141, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.8590442489513543e-05, |
|
"loss": 0.104, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.858353968657555e-05, |
|
"loss": 0.1402, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.857662051563649e-05, |
|
"loss": 0.1181, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.856968498149855e-05, |
|
"loss": 0.0688, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.85627330889753e-05, |
|
"loss": 0.1549, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.855576484289166e-05, |
|
"loss": 0.1017, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.854878024808388e-05, |
|
"loss": 0.0746, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_loss": 0.1376887559890747, |
|
"eval_runtime": 27.1573, |
|
"eval_samples_per_second": 22.094, |
|
"eval_steps_per_second": 2.762, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.8541779309399586e-05, |
|
"loss": 0.1044, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 4.853476203169773e-05, |
|
"loss": 0.1151, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 4.852772841984861e-05, |
|
"loss": 0.1172, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 4.852067847873385e-05, |
|
"loss": 0.1063, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 4.851361221324643e-05, |
|
"loss": 0.0837, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.8506529628290625e-05, |
|
"loss": 0.1099, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.8499430728782066e-05, |
|
"loss": 0.0857, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 0.0893, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.8485184005825815e-05, |
|
"loss": 0.1143, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.847803619226594e-05, |
|
"loss": 0.1008, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.8470872083929005e-05, |
|
"loss": 0.0968, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.8463691685787205e-05, |
|
"loss": 0.1305, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.8456495002824034e-05, |
|
"loss": 0.1634, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.844928204003433e-05, |
|
"loss": 0.091, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.844205280242418e-05, |
|
"loss": 0.0934, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.843480729501099e-05, |
|
"loss": 0.0847, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.842754552282347e-05, |
|
"loss": 0.0897, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.8420267490901595e-05, |
|
"loss": 0.0928, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.841297320429664e-05, |
|
"loss": 0.0844, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.840566266807115e-05, |
|
"loss": 0.1003, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.8398335887298965e-05, |
|
"loss": 0.0999, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.839099286706519e-05, |
|
"loss": 0.0724, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.8383633612466186e-05, |
|
"loss": 0.0851, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.837625812860961e-05, |
|
"loss": 0.0983, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.836886642061435e-05, |
|
"loss": 0.1012, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.83614584936106e-05, |
|
"loss": 0.1237, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.8354034352739754e-05, |
|
"loss": 0.0622, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.834659400315451e-05, |
|
"loss": 0.0703, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.833913745001878e-05, |
|
"loss": 0.0912, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.833166469850775e-05, |
|
"loss": 0.1437, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.8324175753807816e-05, |
|
"loss": 0.1134, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.8316670621116645e-05, |
|
"loss": 0.0911, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.830914930564312e-05, |
|
"loss": 0.0974, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.830161181260736e-05, |
|
"loss": 0.1338, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.829405814724072e-05, |
|
"loss": 0.1153, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.828648831478576e-05, |
|
"loss": 0.0677, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.8278902320496276e-05, |
|
"loss": 0.1033, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.827130016963729e-05, |
|
"loss": 0.12, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.8263681867485e-05, |
|
"loss": 0.1481, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.825604741932687e-05, |
|
"loss": 0.0802, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.824839683046152e-05, |
|
"loss": 0.0675, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.8240730106198784e-05, |
|
"loss": 0.0738, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.8233047251859716e-05, |
|
"loss": 0.0777, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.8225348272776544e-05, |
|
"loss": 0.0782, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.8217633174292697e-05, |
|
"loss": 0.1094, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.820990196176279e-05, |
|
"loss": 0.0894, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.820215464055261e-05, |
|
"loss": 0.0893, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.819439121603915e-05, |
|
"loss": 0.0912, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.818661169361055e-05, |
|
"loss": 0.065, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.817881607866614e-05, |
|
"loss": 0.0898, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 0.11340118199586868, |
|
"eval_runtime": 27.0787, |
|
"eval_samples_per_second": 22.158, |
|
"eval_steps_per_second": 2.77, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.817100437661643e-05, |
|
"loss": 0.0784, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.8163176592883064e-05, |
|
"loss": 0.0954, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.815533273289886e-05, |
|
"loss": 0.0851, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.814747280210782e-05, |
|
"loss": 0.0842, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.8139596805965054e-05, |
|
"loss": 0.1074, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.813170474993686e-05, |
|
"loss": 0.0737, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.812379663950066e-05, |
|
"loss": 0.0892, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.811587248014503e-05, |
|
"loss": 0.0909, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.8107932277369675e-05, |
|
"loss": 0.078, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.809997603668545e-05, |
|
"loss": 0.0695, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.8092003763614325e-05, |
|
"loss": 0.0712, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.80840154636894e-05, |
|
"loss": 0.0866, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.8076011142454925e-05, |
|
"loss": 0.0918, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.8067990805466215e-05, |
|
"loss": 0.0867, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.8059954458289756e-05, |
|
"loss": 0.0913, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.805190210650311e-05, |
|
"loss": 0.1037, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.804383375569496e-05, |
|
"loss": 0.0726, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.803574941146509e-05, |
|
"loss": 0.0797, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.80276490794244e-05, |
|
"loss": 0.0836, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 4.801953276519486e-05, |
|
"loss": 0.0806, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.801140047440954e-05, |
|
"loss": 0.0705, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.8003252212712616e-05, |
|
"loss": 0.0963, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.7995087985759334e-05, |
|
"loss": 0.0649, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.798690779921601e-05, |
|
"loss": 0.0743, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.7978711658760066e-05, |
|
"loss": 0.0767, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.797049957007996e-05, |
|
"loss": 0.0983, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.796227153887526e-05, |
|
"loss": 0.0788, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.7954027570856556e-05, |
|
"loss": 0.0864, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.794576767174553e-05, |
|
"loss": 0.0839, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.7937491847274916e-05, |
|
"loss": 0.0835, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.792920010318849e-05, |
|
"loss": 0.0833, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.792089244524108e-05, |
|
"loss": 0.0795, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.7912568879198575e-05, |
|
"loss": 0.07, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.790422941083786e-05, |
|
"loss": 0.0625, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.789587404594693e-05, |
|
"loss": 0.0599, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.788750279032473e-05, |
|
"loss": 0.0804, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.787911564978132e-05, |
|
"loss": 0.0719, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.7870712630137695e-05, |
|
"loss": 0.0592, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.786229373722595e-05, |
|
"loss": 0.0473, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.785385897688914e-05, |
|
"loss": 0.0767, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.784540835498136e-05, |
|
"loss": 0.076, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.7836941877367726e-05, |
|
"loss": 0.0825, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.7828459549924315e-05, |
|
"loss": 0.0579, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.7819961378538244e-05, |
|
"loss": 0.0569, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.781144736910762e-05, |
|
"loss": 0.0624, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.780291752754153e-05, |
|
"loss": 0.087, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 4.779437185976004e-05, |
|
"loss": 0.0871, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.778581037169424e-05, |
|
"loss": 0.0889, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.7777233069286154e-05, |
|
"loss": 0.0688, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.776863995848882e-05, |
|
"loss": 0.0607, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_loss": 0.09115689992904663, |
|
"eval_runtime": 27.0579, |
|
"eval_samples_per_second": 22.175, |
|
"eval_steps_per_second": 2.772, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.776003104526621e-05, |
|
"loss": 0.0763, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.775140633559329e-05, |
|
"loss": 0.06, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.7742765835455996e-05, |
|
"loss": 0.0565, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.77341095508512e-05, |
|
"loss": 0.0654, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.772543748778674e-05, |
|
"loss": 0.0764, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.7716749652281415e-05, |
|
"loss": 0.084, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.770804605036494e-05, |
|
"loss": 0.0475, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.769932668807801e-05, |
|
"loss": 0.0681, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.7690591571472234e-05, |
|
"loss": 0.062, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.768184070661016e-05, |
|
"loss": 0.0718, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.7673074099565286e-05, |
|
"loss": 0.0505, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.766429175642198e-05, |
|
"loss": 0.0811, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.765549368327562e-05, |
|
"loss": 0.0587, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.7646679886232414e-05, |
|
"loss": 0.0511, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.763785037140953e-05, |
|
"loss": 0.0622, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.7629005144935036e-05, |
|
"loss": 0.0623, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 4.762014421294791e-05, |
|
"loss": 0.0466, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 4.761126758159801e-05, |
|
"loss": 0.0743, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 4.760237525704611e-05, |
|
"loss": 0.08, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.759346724546386e-05, |
|
"loss": 0.047, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.758454355303383e-05, |
|
"loss": 0.0501, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.757560418594944e-05, |
|
"loss": 0.0524, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.7566649150415e-05, |
|
"loss": 0.0496, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.755767845264568e-05, |
|
"loss": 0.0538, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.7548692098867555e-05, |
|
"loss": 0.0828, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.7539690095317526e-05, |
|
"loss": 0.056, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.753067244824338e-05, |
|
"loss": 0.0577, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.7521639163903765e-05, |
|
"loss": 0.058, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.7512590248568163e-05, |
|
"loss": 0.0668, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.7503525708516916e-05, |
|
"loss": 0.0605, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.7494445550041214e-05, |
|
"loss": 0.0567, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 4.748534977944308e-05, |
|
"loss": 0.0617, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 4.747623840303537e-05, |
|
"loss": 0.0588, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 4.7467111427141776e-05, |
|
"loss": 0.0873, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 4.7457968858096815e-05, |
|
"loss": 0.0478, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.744881070224583e-05, |
|
"loss": 0.0656, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.743963696594498e-05, |
|
"loss": 0.0587, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.743044765556123e-05, |
|
"loss": 0.0546, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.7421242777472366e-05, |
|
"loss": 0.0895, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.7412022338066976e-05, |
|
"loss": 0.0667, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.7402786343744437e-05, |
|
"loss": 0.0581, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.739353480091494e-05, |
|
"loss": 0.0799, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.738426771599945e-05, |
|
"loss": 0.0617, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.7374985095429725e-05, |
|
"loss": 0.0631, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.736568694564831e-05, |
|
"loss": 0.0639, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.7356373273108534e-05, |
|
"loss": 0.0571, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.7347044084274486e-05, |
|
"loss": 0.0552, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.733769938562101e-05, |
|
"loss": 0.059, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.7328339183633753e-05, |
|
"loss": 0.0601, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.7318963484809097e-05, |
|
"loss": 0.0491, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"eval_loss": 0.07235535979270935, |
|
"eval_runtime": 26.886, |
|
"eval_samples_per_second": 22.316, |
|
"eval_steps_per_second": 2.79, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 4.730957229565419e-05, |
|
"loss": 0.0739, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 4.730016562268691e-05, |
|
"loss": 0.0543, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 4.729074347243591e-05, |
|
"loss": 0.0732, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 4.728130585144056e-05, |
|
"loss": 0.0597, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.727185276625099e-05, |
|
"loss": 0.0596, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.726238422342805e-05, |
|
"loss": 0.0682, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.7252900229543316e-05, |
|
"loss": 0.0577, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.7243400791179096e-05, |
|
"loss": 0.0789, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.723388591492841e-05, |
|
"loss": 0.0431, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.7224355607395e-05, |
|
"loss": 0.061, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.72148098751933e-05, |
|
"loss": 0.032, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.720524872494848e-05, |
|
"loss": 0.0622, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.719567216329638e-05, |
|
"loss": 0.0569, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.718608019688356e-05, |
|
"loss": 0.0516, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.717647283236726e-05, |
|
"loss": 0.0483, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.7166850076415396e-05, |
|
"loss": 0.0463, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.715721193570659e-05, |
|
"loss": 0.053, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.7147558416930136e-05, |
|
"loss": 0.0491, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.713788952678598e-05, |
|
"loss": 0.0486, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.7128205271984774e-05, |
|
"loss": 0.0393, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.711850565924778e-05, |
|
"loss": 0.0655, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.710879069530698e-05, |
|
"loss": 0.0632, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.709906038690496e-05, |
|
"loss": 0.0408, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.708931474079499e-05, |
|
"loss": 0.0829, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.7079553763740966e-05, |
|
"loss": 0.039, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.706977746251743e-05, |
|
"loss": 0.0502, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.705998584390957e-05, |
|
"loss": 0.0364, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.7050178914713176e-05, |
|
"loss": 0.0529, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.7040356681734695e-05, |
|
"loss": 0.0433, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.7030519151791186e-05, |
|
"loss": 0.0473, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.7020666331710315e-05, |
|
"loss": 0.0482, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.701079822833037e-05, |
|
"loss": 0.0467, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.700091484850024e-05, |
|
"loss": 0.0358, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.699101619907943e-05, |
|
"loss": 0.0364, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.698110228693802e-05, |
|
"loss": 0.0474, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.69711731189567e-05, |
|
"loss": 0.0713, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.696122870202675e-05, |
|
"loss": 0.0589, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.695126904305002e-05, |
|
"loss": 0.054, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.6941294148938954e-05, |
|
"loss": 0.0379, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.6931304026616553e-05, |
|
"loss": 0.0516, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.692129868301639e-05, |
|
"loss": 0.0369, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.691127812508263e-05, |
|
"loss": 0.0343, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.690124235976996e-05, |
|
"loss": 0.0343, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.689119139404364e-05, |
|
"loss": 0.0509, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.6881125234879474e-05, |
|
"loss": 0.0343, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.6871043889263825e-05, |
|
"loss": 0.0469, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.686094736419357e-05, |
|
"loss": 0.027, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.6850835666676144e-05, |
|
"loss": 0.0367, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.684070880372951e-05, |
|
"loss": 0.0526, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 4.683056678238213e-05, |
|
"loss": 0.0429, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 0.06248383969068527, |
|
"eval_runtime": 27.0153, |
|
"eval_samples_per_second": 22.21, |
|
"eval_steps_per_second": 2.776, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.682040960967303e-05, |
|
"loss": 0.0452, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.6810237292651715e-05, |
|
"loss": 0.054, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.680004983837823e-05, |
|
"loss": 0.0371, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.678984725392309e-05, |
|
"loss": 0.0283, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.6779629546367355e-05, |
|
"loss": 0.0631, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.676939672280254e-05, |
|
"loss": 0.0411, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.6759148790330673e-05, |
|
"loss": 0.0314, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 4.6748885756064266e-05, |
|
"loss": 0.0305, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 4.673860762712632e-05, |
|
"loss": 0.0329, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 4.672831441065028e-05, |
|
"loss": 0.0589, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 4.671800611378011e-05, |
|
"loss": 0.0652, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.67076827436702e-05, |
|
"loss": 0.0484, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.6697344307485426e-05, |
|
"loss": 0.0309, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.668699081240111e-05, |
|
"loss": 0.0322, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.667662226560302e-05, |
|
"loss": 0.0318, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.666623867428739e-05, |
|
"loss": 0.0362, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.665584004566087e-05, |
|
"loss": 0.0573, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.664542638694057e-05, |
|
"loss": 0.0446, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.6634997705354024e-05, |
|
"loss": 0.0291, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 4.6624554008139175e-05, |
|
"loss": 0.0498, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 4.6614095302544425e-05, |
|
"loss": 0.0269, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 4.660362159582855e-05, |
|
"loss": 0.044, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 4.659313289526076e-05, |
|
"loss": 0.0401, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 4.658262920812069e-05, |
|
"loss": 0.0586, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 4.657211054169832e-05, |
|
"loss": 0.0351, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 4.656157690329409e-05, |
|
"loss": 0.0464, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 4.655102830021879e-05, |
|
"loss": 0.0396, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 4.6540464739793624e-05, |
|
"loss": 0.0423, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 4.6529886229350134e-05, |
|
"loss": 0.0472, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 4.651929277623029e-05, |
|
"loss": 0.0337, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.6508684387786395e-05, |
|
"loss": 0.043, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.649806107138114e-05, |
|
"loss": 0.0477, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.6487422834387564e-05, |
|
"loss": 0.0453, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.6476769684189065e-05, |
|
"loss": 0.056, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.6466101628179395e-05, |
|
"loss": 0.0358, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.6455418673762646e-05, |
|
"loss": 0.0394, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.6444720828353256e-05, |
|
"loss": 0.0468, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.6434008099375984e-05, |
|
"loss": 0.0348, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 4.642328049426595e-05, |
|
"loss": 0.0374, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 4.641253802046855e-05, |
|
"loss": 0.0327, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 4.640178068543956e-05, |
|
"loss": 0.0412, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.639100849664501e-05, |
|
"loss": 0.0426, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.638022146156127e-05, |
|
"loss": 0.0355, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.6369419587675025e-05, |
|
"loss": 0.0354, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.635860288248324e-05, |
|
"loss": 0.0292, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 4.634777135349317e-05, |
|
"loss": 0.0304, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 4.633692500822239e-05, |
|
"loss": 0.0272, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 4.63260638541987e-05, |
|
"loss": 0.0364, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 4.631518789896023e-05, |
|
"loss": 0.0374, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 4.630429715005538e-05, |
|
"loss": 0.0424, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"eval_loss": 0.05031120032072067, |
|
"eval_runtime": 27.0122, |
|
"eval_samples_per_second": 22.212, |
|
"eval_steps_per_second": 2.777, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 4.629339161504278e-05, |
|
"loss": 0.0294, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 4.628247130149135e-05, |
|
"loss": 0.0411, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 4.6271536216980274e-05, |
|
"loss": 0.0336, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.626058636909897e-05, |
|
"loss": 0.0262, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.6249621765447096e-05, |
|
"loss": 0.0554, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.623864241363458e-05, |
|
"loss": 0.041, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.622764832128156e-05, |
|
"loss": 0.0497, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.621663949601841e-05, |
|
"loss": 0.0263, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.620561594548574e-05, |
|
"loss": 0.0435, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.6194577677334386e-05, |
|
"loss": 0.0238, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 4.6183524699225354e-05, |
|
"loss": 0.0339, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.6172457018829916e-05, |
|
"loss": 0.0366, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.616137464382952e-05, |
|
"loss": 0.0474, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 4.6150277581915804e-05, |
|
"loss": 0.0582, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 4.613916584079062e-05, |
|
"loss": 0.057, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 4.612803942816599e-05, |
|
"loss": 0.0479, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 4.611689835176414e-05, |
|
"loss": 0.0401, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 4.610574261931746e-05, |
|
"loss": 0.0415, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.609457223856851e-05, |
|
"loss": 0.0384, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.6083387217270013e-05, |
|
"loss": 0.0257, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.607218756318487e-05, |
|
"loss": 0.0329, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.606097328408612e-05, |
|
"loss": 0.0312, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.6049744387756966e-05, |
|
"loss": 0.045, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.6038500881990746e-05, |
|
"loss": 0.0504, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.6027242774590936e-05, |
|
"loss": 0.0503, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.601597007337116e-05, |
|
"loss": 0.0346, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 4.600468278615516e-05, |
|
"loss": 0.0462, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 4.59933809207768e-05, |
|
"loss": 0.0375, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 4.598206448508007e-05, |
|
"loss": 0.037, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 4.597073348691907e-05, |
|
"loss": 0.0411, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 4.595938793415801e-05, |
|
"loss": 0.0266, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 4.594802783467117e-05, |
|
"loss": 0.0472, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 4.5936653196342984e-05, |
|
"loss": 0.0274, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 4.592526402706793e-05, |
|
"loss": 0.0342, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 4.5913860334750583e-05, |
|
"loss": 0.0215, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 4.5902442127305614e-05, |
|
"loss": 0.0384, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 4.589100941265775e-05, |
|
"loss": 0.037, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 4.5879562198741776e-05, |
|
"loss": 0.0397, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 4.586810049350257e-05, |
|
"loss": 0.03, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 4.585662430489507e-05, |
|
"loss": 0.0348, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 4.584513364088422e-05, |
|
"loss": 0.0272, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 4.583362850944505e-05, |
|
"loss": 0.0469, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 4.582210891856262e-05, |
|
"loss": 0.0407, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 4.581057487623204e-05, |
|
"loss": 0.0421, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 4.579902639045841e-05, |
|
"loss": 0.035, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.578746346925691e-05, |
|
"loss": 0.0239, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.5775886120652686e-05, |
|
"loss": 0.0403, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.576429435268094e-05, |
|
"loss": 0.0221, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.5752688173386845e-05, |
|
"loss": 0.0221, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.5741067590825604e-05, |
|
"loss": 0.039, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 0.03871891275048256, |
|
"eval_runtime": 27.0151, |
|
"eval_samples_per_second": 22.21, |
|
"eval_steps_per_second": 2.776, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.572943261306241e-05, |
|
"loss": 0.0313, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 4.571778324817243e-05, |
|
"loss": 0.0459, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 4.570611950424084e-05, |
|
"loss": 0.0318, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 4.5694441389362783e-05, |
|
"loss": 0.0338, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 4.5682748911643374e-05, |
|
"loss": 0.0316, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 4.56710420791977e-05, |
|
"loss": 0.0202, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 4.5659320900150824e-05, |
|
"loss": 0.0422, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 4.5647585382637734e-05, |
|
"loss": 0.0239, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 4.5635835534803406e-05, |
|
"loss": 0.0318, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 4.5624071364802735e-05, |
|
"loss": 0.0212, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.561229288080057e-05, |
|
"loss": 0.0394, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.560050009097169e-05, |
|
"loss": 0.0277, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.55886930035008e-05, |
|
"loss": 0.0302, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 4.5576871626582534e-05, |
|
"loss": 0.0241, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.5565035968421446e-05, |
|
"loss": 0.0222, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.555318603723199e-05, |
|
"loss": 0.0185, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.5541321841238525e-05, |
|
"loss": 0.0164, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.552944338867534e-05, |
|
"loss": 0.0225, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.551755068778659e-05, |
|
"loss": 0.0209, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.550564374682632e-05, |
|
"loss": 0.0218, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.5493722574058455e-05, |
|
"loss": 0.0179, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.548178717775683e-05, |
|
"loss": 0.0208, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.546983756620511e-05, |
|
"loss": 0.0224, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.545787374769686e-05, |
|
"loss": 0.0239, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.544589573053547e-05, |
|
"loss": 0.0258, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.5433903523034226e-05, |
|
"loss": 0.014, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.542189713351622e-05, |
|
"loss": 0.0165, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.540987657031443e-05, |
|
"loss": 0.0215, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.5397841841771626e-05, |
|
"loss": 0.0104, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.5385792956240444e-05, |
|
"loss": 0.019, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.537372992208334e-05, |
|
"loss": 0.0192, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.536165274767258e-05, |
|
"loss": 0.0141, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.534956144139024e-05, |
|
"loss": 0.0174, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.5337456011628214e-05, |
|
"loss": 0.012, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.5325336466788205e-05, |
|
"loss": 0.0195, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.53132028152817e-05, |
|
"loss": 0.0144, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.5301055065529975e-05, |
|
"loss": 0.0134, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.528889322596409e-05, |
|
"loss": 0.0113, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.527671730502491e-05, |
|
"loss": 0.0152, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.526452731116304e-05, |
|
"loss": 0.013, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.525232325283887e-05, |
|
"loss": 0.0231, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.5240105138522536e-05, |
|
"loss": 0.0276, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.522787297669396e-05, |
|
"loss": 0.014, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.5215626775842775e-05, |
|
"loss": 0.0186, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.520336654446838e-05, |
|
"loss": 0.0319, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.519109229107992e-05, |
|
"loss": 0.0174, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.517880402419624e-05, |
|
"loss": 0.0115, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.516650175234596e-05, |
|
"loss": 0.0304, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.515418548406736e-05, |
|
"loss": 0.0321, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.514185522790848e-05, |
|
"loss": 0.0173, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"eval_loss": 0.034719910472631454, |
|
"eval_runtime": 26.9947, |
|
"eval_samples_per_second": 22.227, |
|
"eval_steps_per_second": 2.778, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.512951099242706e-05, |
|
"loss": 0.019, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.511715278619052e-05, |
|
"loss": 0.0207, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.5104780617776013e-05, |
|
"loss": 0.0127, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.5092394495770335e-05, |
|
"loss": 0.012, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.5079994428770015e-05, |
|
"loss": 0.0156, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.506758042538122e-05, |
|
"loss": 0.0149, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.505515249421982e-05, |
|
"loss": 0.0156, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.5042710643911345e-05, |
|
"loss": 0.0141, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.503025488309096e-05, |
|
"loss": 0.0163, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 4.501778522040353e-05, |
|
"loss": 0.0209, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.500530166450351e-05, |
|
"loss": 0.0238, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.499280422405506e-05, |
|
"loss": 0.0117, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 4.498029290773193e-05, |
|
"loss": 0.0168, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.496776772421752e-05, |
|
"loss": 0.0158, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.495522868220485e-05, |
|
"loss": 0.0267, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.4942675790396575e-05, |
|
"loss": 0.0128, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.493010905750493e-05, |
|
"loss": 0.0265, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 4.491752849225177e-05, |
|
"loss": 0.0187, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 4.490493410336857e-05, |
|
"loss": 0.0169, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 4.4892325899596375e-05, |
|
"loss": 0.0086, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 4.487970388968583e-05, |
|
"loss": 0.0158, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.4867068082397144e-05, |
|
"loss": 0.0183, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.485441848650014e-05, |
|
"loss": 0.0148, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.484175511077416e-05, |
|
"loss": 0.0173, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 4.482907796400817e-05, |
|
"loss": 0.0097, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 4.481638705500063e-05, |
|
"loss": 0.0172, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 4.480368239255959e-05, |
|
"loss": 0.0195, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 4.479096398550265e-05, |
|
"loss": 0.0166, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 4.477823184265693e-05, |
|
"loss": 0.0149, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.476548597285908e-05, |
|
"loss": 0.0262, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.47527263849553e-05, |
|
"loss": 0.0144, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.473995308780129e-05, |
|
"loss": 0.0113, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.4727166090262285e-05, |
|
"loss": 0.0092, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 4.4714365401213e-05, |
|
"loss": 0.0213, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 4.470155102953769e-05, |
|
"loss": 0.0186, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 4.468872298413007e-05, |
|
"loss": 0.0144, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.467588127389336e-05, |
|
"loss": 0.012, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.466302590774028e-05, |
|
"loss": 0.0175, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.4650156894592995e-05, |
|
"loss": 0.0151, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 4.463727424338316e-05, |
|
"loss": 0.0135, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 4.4624377963051914e-05, |
|
"loss": 0.0151, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 4.461146806254982e-05, |
|
"loss": 0.0149, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 4.459854455083691e-05, |
|
"loss": 0.0207, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 4.458560743688266e-05, |
|
"loss": 0.0243, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.457265672966601e-05, |
|
"loss": 0.0147, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.4559692438175284e-05, |
|
"loss": 0.0171, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.454671457140829e-05, |
|
"loss": 0.0114, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.453372313837221e-05, |
|
"loss": 0.0217, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 4.4520718148083665e-05, |
|
"loss": 0.0181, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 4.4507699609568695e-05, |
|
"loss": 0.0188, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_loss": 0.031473103910684586, |
|
"eval_runtime": 27.0103, |
|
"eval_samples_per_second": 22.214, |
|
"eval_steps_per_second": 2.777, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 4.449466753186273e-05, |
|
"loss": 0.0153, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 4.44816219240106e-05, |
|
"loss": 0.0134, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.446856279506651e-05, |
|
"loss": 0.0118, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.445549015409407e-05, |
|
"loss": 0.021, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 4.444240401016626e-05, |
|
"loss": 0.0257, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.4429304372365444e-05, |
|
"loss": 0.0094, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.441619124978332e-05, |
|
"loss": 0.0131, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.4403064651520975e-05, |
|
"loss": 0.0257, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 4.438992458668884e-05, |
|
"loss": 0.0122, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.4376771064406684e-05, |
|
"loss": 0.0172, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.4363604093803635e-05, |
|
"loss": 0.0179, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.435042368401813e-05, |
|
"loss": 0.0183, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.4337229844197954e-05, |
|
"loss": 0.0148, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.43240225835002e-05, |
|
"loss": 0.0227, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.431080191109128e-05, |
|
"loss": 0.0167, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.4297567836146924e-05, |
|
"loss": 0.0163, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.4284320367852145e-05, |
|
"loss": 0.0131, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.427105951540127e-05, |
|
"loss": 0.0231, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.425778528799789e-05, |
|
"loss": 0.0166, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.424449769485492e-05, |
|
"loss": 0.0179, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.423119674519451e-05, |
|
"loss": 0.0134, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.421788244824811e-05, |
|
"loss": 0.0276, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.420455481325639e-05, |
|
"loss": 0.0189, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.4191213849469346e-05, |
|
"loss": 0.0301, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.417785956614616e-05, |
|
"loss": 0.0123, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.41644919725553e-05, |
|
"loss": 0.0294, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 0.0162, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.4137716891690536e-05, |
|
"loss": 0.0171, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.41243094229997e-05, |
|
"loss": 0.0133, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.4110888681207314e-05, |
|
"loss": 0.0127, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.4097454675627946e-05, |
|
"loss": 0.0163, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.408400741558539e-05, |
|
"loss": 0.0149, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.4070546910412625e-05, |
|
"loss": 0.011, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.405707316945182e-05, |
|
"loss": 0.0174, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.404358620205435e-05, |
|
"loss": 0.016, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.4030086017580744e-05, |
|
"loss": 0.0152, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.4016572625400723e-05, |
|
"loss": 0.0121, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.400304603489316e-05, |
|
"loss": 0.0121, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.398950625544611e-05, |
|
"loss": 0.0178, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.397595329645675e-05, |
|
"loss": 0.0136, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.396238716733144e-05, |
|
"loss": 0.0263, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.3948807877485666e-05, |
|
"loss": 0.016, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.393521543634402e-05, |
|
"loss": 0.0166, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.392160985334027e-05, |
|
"loss": 0.0089, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.390799113791727e-05, |
|
"loss": 0.0148, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.3894359299527e-05, |
|
"loss": 0.0157, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.388071434763056e-05, |
|
"loss": 0.0112, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.386705629169813e-05, |
|
"loss": 0.0066, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.385338514120899e-05, |
|
"loss": 0.0157, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.3839700905651517e-05, |
|
"loss": 0.0134, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"eval_loss": 0.03026873990893364, |
|
"eval_runtime": 26.9629, |
|
"eval_samples_per_second": 22.253, |
|
"eval_steps_per_second": 2.782, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.382600359452317e-05, |
|
"loss": 0.0202, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.381229321733047e-05, |
|
"loss": 0.0127, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.3798569783589014e-05, |
|
"loss": 0.014, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.378483330282348e-05, |
|
"loss": 0.0104, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.377108378456756e-05, |
|
"loss": 0.0083, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.3757321238364015e-05, |
|
"loss": 0.0112, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.374354567376467e-05, |
|
"loss": 0.0159, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.3729757100330346e-05, |
|
"loss": 0.0157, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.371595552763093e-05, |
|
"loss": 0.0199, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.37021409652453e-05, |
|
"loss": 0.0211, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.368831342276136e-05, |
|
"loss": 0.0173, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.3674472909776046e-05, |
|
"loss": 0.0061, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.366061943589524e-05, |
|
"loss": 0.0149, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.3646753010733895e-05, |
|
"loss": 0.0095, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.363287364391588e-05, |
|
"loss": 0.0201, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.36189813450741e-05, |
|
"loss": 0.0138, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.36050761238504e-05, |
|
"loss": 0.0154, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.359115798989562e-05, |
|
"loss": 0.0134, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.357722695286953e-05, |
|
"loss": 0.014, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.3563283022440895e-05, |
|
"loss": 0.0148, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.35493262082874e-05, |
|
"loss": 0.0114, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.353535652009568e-05, |
|
"loss": 0.0166, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.352137396756131e-05, |
|
"loss": 0.0112, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.350737856038878e-05, |
|
"loss": 0.0172, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.3493370308291516e-05, |
|
"loss": 0.0219, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.347934922099186e-05, |
|
"loss": 0.0163, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.3465315308221046e-05, |
|
"loss": 0.0195, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.3451268579719216e-05, |
|
"loss": 0.0179, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.3437209045235425e-05, |
|
"loss": 0.0134, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.342313671452759e-05, |
|
"loss": 0.0175, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.340905159736253e-05, |
|
"loss": 0.0106, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.3394953703515905e-05, |
|
"loss": 0.017, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.338084304277229e-05, |
|
"loss": 0.0129, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.3366719624925084e-05, |
|
"loss": 0.0128, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.3352583459776554e-05, |
|
"loss": 0.0102, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.3338434557137816e-05, |
|
"loss": 0.0129, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.332427292682882e-05, |
|
"loss": 0.015, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.331009857867836e-05, |
|
"loss": 0.015, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.3295911522524044e-05, |
|
"loss": 0.0077, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 4.32817117682123e-05, |
|
"loss": 0.0225, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 4.326749932559838e-05, |
|
"loss": 0.0125, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 4.3253274204546344e-05, |
|
"loss": 0.0098, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 4.323903641492903e-05, |
|
"loss": 0.0063, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 4.322478596662809e-05, |
|
"loss": 0.0083, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 4.3210522869533946e-05, |
|
"loss": 0.0131, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 4.319624713354582e-05, |
|
"loss": 0.0079, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 4.318195876857169e-05, |
|
"loss": 0.0143, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 4.316765778452829e-05, |
|
"loss": 0.023, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 4.315334419134114e-05, |
|
"loss": 0.0126, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 4.3139017998944486e-05, |
|
"loss": 0.0113, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"eval_loss": 0.027030887082219124, |
|
"eval_runtime": 26.8967, |
|
"eval_samples_per_second": 22.308, |
|
"eval_steps_per_second": 2.788, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 4.312467921728133e-05, |
|
"loss": 0.0222, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.311032785630341e-05, |
|
"loss": 0.0131, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.3095963925971194e-05, |
|
"loss": 0.024, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.308158743625388e-05, |
|
"loss": 0.0095, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.306719839712936e-05, |
|
"loss": 0.0142, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 4.305279681858426e-05, |
|
"loss": 0.0151, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 4.303838271061391e-05, |
|
"loss": 0.0157, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 4.302395608322233e-05, |
|
"loss": 0.016, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 4.30095169464222e-05, |
|
"loss": 0.0152, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 4.2995065310234926e-05, |
|
"loss": 0.0108, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 4.2980601184690576e-05, |
|
"loss": 0.0138, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 4.2966124579827874e-05, |
|
"loss": 0.0113, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 4.2951635505694214e-05, |
|
"loss": 0.014, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 4.293713397234565e-05, |
|
"loss": 0.0071, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 4.2922619989846856e-05, |
|
"loss": 0.0091, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 4.290809356827119e-05, |
|
"loss": 0.0108, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 4.2893554717700604e-05, |
|
"loss": 0.0141, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 4.2879003448225694e-05, |
|
"loss": 0.0119, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 4.286443976994569e-05, |
|
"loss": 0.0119, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 4.284986369296839e-05, |
|
"loss": 0.0157, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.2835275227410245e-05, |
|
"loss": 0.0129, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.2820674383396274e-05, |
|
"loss": 0.015, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.28060611710601e-05, |
|
"loss": 0.015, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.2791435600543934e-05, |
|
"loss": 0.0105, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 4.277679768199855e-05, |
|
"loss": 0.0131, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 4.2762147425583285e-05, |
|
"loss": 0.0126, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 4.274748484146609e-05, |
|
"loss": 0.0281, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.2732809939823404e-05, |
|
"loss": 0.0082, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.2718122730840254e-05, |
|
"loss": 0.0066, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.27034232247102e-05, |
|
"loss": 0.018, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.268871143163535e-05, |
|
"loss": 0.0127, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 4.2673987361826315e-05, |
|
"loss": 0.0251, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 4.2659251025502244e-05, |
|
"loss": 0.012, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 4.264450243289079e-05, |
|
"loss": 0.0223, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 4.2629741594228126e-05, |
|
"loss": 0.0182, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.261496851975891e-05, |
|
"loss": 0.0073, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.260018321973629e-05, |
|
"loss": 0.0084, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.258538570442192e-05, |
|
"loss": 0.0124, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.257057598408591e-05, |
|
"loss": 0.0127, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.255575406900685e-05, |
|
"loss": 0.0135, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.2540919969471793e-05, |
|
"loss": 0.0098, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.252607369577625e-05, |
|
"loss": 0.0164, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 4.251121525822419e-05, |
|
"loss": 0.0117, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.249634466712799e-05, |
|
"loss": 0.0238, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.248146193280851e-05, |
|
"loss": 0.0134, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 4.2466567065595e-05, |
|
"loss": 0.0207, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.245166007582515e-05, |
|
"loss": 0.0118, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.243674097384506e-05, |
|
"loss": 0.018, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.2421809770009225e-05, |
|
"loss": 0.014, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.240686647468056e-05, |
|
"loss": 0.0146, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 0.024349618703126907, |
|
"eval_runtime": 26.998, |
|
"eval_samples_per_second": 22.224, |
|
"eval_steps_per_second": 2.778, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.239191109823036e-05, |
|
"loss": 0.0126, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.237694365103831e-05, |
|
"loss": 0.0142, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.236196414349246e-05, |
|
"loss": 0.0133, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 4.2346972585989234e-05, |
|
"loss": 0.0096, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.2331968988933436e-05, |
|
"loss": 0.009, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.231695336273821e-05, |
|
"loss": 0.0099, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.2301925717825055e-05, |
|
"loss": 0.0123, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.22868860646238e-05, |
|
"loss": 0.0119, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.227183441357263e-05, |
|
"loss": 0.0101, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.225677077511804e-05, |
|
"loss": 0.0084, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.2241695159714844e-05, |
|
"loss": 0.0204, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.2226607577826184e-05, |
|
"loss": 0.0072, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.22115080399235e-05, |
|
"loss": 0.0132, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.219639655648651e-05, |
|
"loss": 0.0255, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.2181273138003244e-05, |
|
"loss": 0.0065, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.216613779497003e-05, |
|
"loss": 0.0104, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.215099053789143e-05, |
|
"loss": 0.0096, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.213583137728031e-05, |
|
"loss": 0.0147, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.212066032365779e-05, |
|
"loss": 0.0175, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.210547738755323e-05, |
|
"loss": 0.0175, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.209028257950426e-05, |
|
"loss": 0.0141, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.207507591005673e-05, |
|
"loss": 0.0145, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.205985738976472e-05, |
|
"loss": 0.0162, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.204462702919055e-05, |
|
"loss": 0.011, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.2029384838904776e-05, |
|
"loss": 0.0127, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.20141308294861e-05, |
|
"loss": 0.0124, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.199886501152149e-05, |
|
"loss": 0.0166, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.198358739560608e-05, |
|
"loss": 0.0114, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.196829799234321e-05, |
|
"loss": 0.0079, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.195299681234438e-05, |
|
"loss": 0.0167, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.1937683866229286e-05, |
|
"loss": 0.0065, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.1922359164625744e-05, |
|
"loss": 0.028, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.190702271816981e-05, |
|
"loss": 0.0122, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.189167453750561e-05, |
|
"loss": 0.0054, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.187631463328545e-05, |
|
"loss": 0.0129, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.186094301616979e-05, |
|
"loss": 0.0145, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.184555969682719e-05, |
|
"loss": 0.0224, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.183016468593434e-05, |
|
"loss": 0.0116, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.181475799417605e-05, |
|
"loss": 0.0198, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.1799339632245224e-05, |
|
"loss": 0.0127, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.178390961084289e-05, |
|
"loss": 0.0132, |
|
"step": 1041 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.1768467940678147e-05, |
|
"loss": 0.0133, |
|
"step": 1042 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.175301463246818e-05, |
|
"loss": 0.0108, |
|
"step": 1043 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.173754969693826e-05, |
|
"loss": 0.0064, |
|
"step": 1044 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.172207314482171e-05, |
|
"loss": 0.0127, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.170658498685996e-05, |
|
"loss": 0.0192, |
|
"step": 1046 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.169108523380245e-05, |
|
"loss": 0.0082, |
|
"step": 1047 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.167557389640667e-05, |
|
"loss": 0.0091, |
|
"step": 1048 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.166005098543818e-05, |
|
"loss": 0.011, |
|
"step": 1049 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.164451651167054e-05, |
|
"loss": 0.0148, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"eval_loss": 0.0241435207426548, |
|
"eval_runtime": 26.9627, |
|
"eval_samples_per_second": 22.253, |
|
"eval_steps_per_second": 2.782, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.1628970485885355e-05, |
|
"loss": 0.0103, |
|
"step": 1051 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.161341291887224e-05, |
|
"loss": 0.0058, |
|
"step": 1052 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.1597843821428836e-05, |
|
"loss": 0.0245, |
|
"step": 1053 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.1582263204360755e-05, |
|
"loss": 0.0246, |
|
"step": 1054 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.156667107848163e-05, |
|
"loss": 0.0139, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.155106745461308e-05, |
|
"loss": 0.0213, |
|
"step": 1056 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.153545234358469e-05, |
|
"loss": 0.0119, |
|
"step": 1057 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.151982575623402e-05, |
|
"loss": 0.0109, |
|
"step": 1058 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.1504187703406604e-05, |
|
"loss": 0.0233, |
|
"step": 1059 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.148853819595595e-05, |
|
"loss": 0.0194, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.1472877244743466e-05, |
|
"loss": 0.016, |
|
"step": 1061 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.1457204860638544e-05, |
|
"loss": 0.0075, |
|
"step": 1062 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.144152105451851e-05, |
|
"loss": 0.0102, |
|
"step": 1063 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.1425825837268596e-05, |
|
"loss": 0.0108, |
|
"step": 1064 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.141011921978196e-05, |
|
"loss": 0.012, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.139440121295969e-05, |
|
"loss": 0.0101, |
|
"step": 1066 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.1378671827710755e-05, |
|
"loss": 0.0079, |
|
"step": 1067 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.136293107495203e-05, |
|
"loss": 0.0137, |
|
"step": 1068 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.13471789656083e-05, |
|
"loss": 0.0099, |
|
"step": 1069 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.133141551061219e-05, |
|
"loss": 0.0108, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.1315640720904236e-05, |
|
"loss": 0.0172, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.129985460743283e-05, |
|
"loss": 0.0105, |
|
"step": 1072 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.128405718115421e-05, |
|
"loss": 0.0051, |
|
"step": 1073 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.126824845303248e-05, |
|
"loss": 0.0076, |
|
"step": 1074 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.125242843403958e-05, |
|
"loss": 0.0079, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.12365971351553e-05, |
|
"loss": 0.0148, |
|
"step": 1076 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.122075456736725e-05, |
|
"loss": 0.0112, |
|
"step": 1077 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.120490074167085e-05, |
|
"loss": 0.0121, |
|
"step": 1078 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.1189035669069344e-05, |
|
"loss": 0.0096, |
|
"step": 1079 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 4.117315936057379e-05, |
|
"loss": 0.0129, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.1157271827203026e-05, |
|
"loss": 0.0131, |
|
"step": 1081 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.11413730799837e-05, |
|
"loss": 0.0049, |
|
"step": 1082 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.112546312995024e-05, |
|
"loss": 0.014, |
|
"step": 1083 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.110954198814482e-05, |
|
"loss": 0.0062, |
|
"step": 1084 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 4.1093609665617416e-05, |
|
"loss": 0.0109, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 4.1077666173425756e-05, |
|
"loss": 0.0118, |
|
"step": 1086 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 4.106171152263532e-05, |
|
"loss": 0.0121, |
|
"step": 1087 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 4.10457457243193e-05, |
|
"loss": 0.011, |
|
"step": 1088 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.102976878955869e-05, |
|
"loss": 0.0104, |
|
"step": 1089 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.101378072944216e-05, |
|
"loss": 0.0157, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.0997781555066115e-05, |
|
"loss": 0.0166, |
|
"step": 1091 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 4.0981771277534684e-05, |
|
"loss": 0.0164, |
|
"step": 1092 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.0965749907959696e-05, |
|
"loss": 0.0102, |
|
"step": 1093 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.0949717457460674e-05, |
|
"loss": 0.0117, |
|
"step": 1094 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.093367393716483e-05, |
|
"loss": 0.0114, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.091761935820708e-05, |
|
"loss": 0.0197, |
|
"step": 1096 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.090155373172998e-05, |
|
"loss": 0.0105, |
|
"step": 1097 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.088547706888378e-05, |
|
"loss": 0.0127, |
|
"step": 1098 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.0869389380826395e-05, |
|
"loss": 0.0083, |
|
"step": 1099 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.0853290678723364e-05, |
|
"loss": 0.017, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"eval_loss": 0.021086279302835464, |
|
"eval_runtime": 27.0777, |
|
"eval_samples_per_second": 22.158, |
|
"eval_steps_per_second": 2.77, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.083718097374788e-05, |
|
"loss": 0.0132, |
|
"step": 1101 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.08210602770808e-05, |
|
"loss": 0.0088, |
|
"step": 1102 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 4.080492859991057e-05, |
|
"loss": 0.0094, |
|
"step": 1103 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.0788785953433286e-05, |
|
"loss": 0.0091, |
|
"step": 1104 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.077263234885264e-05, |
|
"loss": 0.0126, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.075646779737993e-05, |
|
"loss": 0.0135, |
|
"step": 1106 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 4.074029231023406e-05, |
|
"loss": 0.0047, |
|
"step": 1107 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.072410589864154e-05, |
|
"loss": 0.0157, |
|
"step": 1108 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.0707908573836415e-05, |
|
"loss": 0.0123, |
|
"step": 1109 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.069170034706035e-05, |
|
"loss": 0.012, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.067548122956254e-05, |
|
"loss": 0.0088, |
|
"step": 1111 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.065925123259978e-05, |
|
"loss": 0.0115, |
|
"step": 1112 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.064301036743638e-05, |
|
"loss": 0.0116, |
|
"step": 1113 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.0626758645344205e-05, |
|
"loss": 0.0059, |
|
"step": 1114 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 4.061049607760266e-05, |
|
"loss": 0.0101, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.059422267549868e-05, |
|
"loss": 0.015, |
|
"step": 1116 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.05779384503267e-05, |
|
"loss": 0.0114, |
|
"step": 1117 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.056164341338868e-05, |
|
"loss": 0.0061, |
|
"step": 1118 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.05453375759941e-05, |
|
"loss": 0.0113, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.0529020949459904e-05, |
|
"loss": 0.0203, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.051269354511055e-05, |
|
"loss": 0.0085, |
|
"step": 1121 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.049635537427795e-05, |
|
"loss": 0.0121, |
|
"step": 1122 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.048000644830152e-05, |
|
"loss": 0.0158, |
|
"step": 1123 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.046364677852812e-05, |
|
"loss": 0.0037, |
|
"step": 1124 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.044727637631207e-05, |
|
"loss": 0.0101, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 4.0430895253015144e-05, |
|
"loss": 0.0181, |
|
"step": 1126 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.041450342000655e-05, |
|
"loss": 0.0146, |
|
"step": 1127 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.039810088866293e-05, |
|
"loss": 0.0089, |
|
"step": 1128 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.038168767036836e-05, |
|
"loss": 0.0124, |
|
"step": 1129 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 4.036526377651432e-05, |
|
"loss": 0.0182, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.0348829218499706e-05, |
|
"loss": 0.0116, |
|
"step": 1131 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.0332384007730826e-05, |
|
"loss": 0.0213, |
|
"step": 1132 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.031592815562135e-05, |
|
"loss": 0.0076, |
|
"step": 1133 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 4.0299461673592376e-05, |
|
"loss": 0.0145, |
|
"step": 1134 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.028298457307235e-05, |
|
"loss": 0.012, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.026649686549708e-05, |
|
"loss": 0.0149, |
|
"step": 1136 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.0249998562309785e-05, |
|
"loss": 0.0097, |
|
"step": 1137 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 4.023348967496098e-05, |
|
"loss": 0.0171, |
|
"step": 1138 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.0216970214908545e-05, |
|
"loss": 0.0165, |
|
"step": 1139 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.020044019361772e-05, |
|
"loss": 0.0098, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.018389962256104e-05, |
|
"loss": 0.0068, |
|
"step": 1141 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.01673485132184e-05, |
|
"loss": 0.0153, |
|
"step": 1142 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.015078687707697e-05, |
|
"loss": 0.0062, |
|
"step": 1143 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.013421472563127e-05, |
|
"loss": 0.0093, |
|
"step": 1144 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.011763207038307e-05, |
|
"loss": 0.0062, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.010103892284146e-05, |
|
"loss": 0.0096, |
|
"step": 1146 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.00844352945228e-05, |
|
"loss": 0.0094, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.006782119695074e-05, |
|
"loss": 0.0073, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 4.0051196641656185e-05, |
|
"loss": 0.0061, |
|
"step": 1149 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.003456164017729e-05, |
|
"loss": 0.0038, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"eval_loss": 0.020303795114159584, |
|
"eval_runtime": 26.893, |
|
"eval_samples_per_second": 22.311, |
|
"eval_steps_per_second": 2.789, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.001791620405947e-05, |
|
"loss": 0.0121, |
|
"step": 1151 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.000126034485539e-05, |
|
"loss": 0.0061, |
|
"step": 1152 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.9984594074124924e-05, |
|
"loss": 0.0033, |
|
"step": 1153 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.99679174034352e-05, |
|
"loss": 0.0103, |
|
"step": 1154 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.995123034436056e-05, |
|
"loss": 0.0043, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.993453290848252e-05, |
|
"loss": 0.0101, |
|
"step": 1156 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 3.9917825107389853e-05, |
|
"loss": 0.0055, |
|
"step": 1157 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.9901106952678494e-05, |
|
"loss": 0.0034, |
|
"step": 1158 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.9884378455951554e-05, |
|
"loss": 0.005, |
|
"step": 1159 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.986763962881935e-05, |
|
"loss": 0.0038, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.985089048289935e-05, |
|
"loss": 0.0044, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.983413102981619e-05, |
|
"loss": 0.0143, |
|
"step": 1162 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.981736128120166e-05, |
|
"loss": 0.0064, |
|
"step": 1163 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.980058124869469e-05, |
|
"loss": 0.007, |
|
"step": 1164 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.978379094394135e-05, |
|
"loss": 0.0128, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.9766990378594854e-05, |
|
"loss": 0.0083, |
|
"step": 1166 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.9750179564315515e-05, |
|
"loss": 0.0108, |
|
"step": 1167 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 3.973335851277077e-05, |
|
"loss": 0.0064, |
|
"step": 1168 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.9716527235635146e-05, |
|
"loss": 0.008, |
|
"step": 1169 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.9699685744590304e-05, |
|
"loss": 0.0092, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.968283405132496e-05, |
|
"loss": 0.0075, |
|
"step": 1171 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 3.966597216753492e-05, |
|
"loss": 0.0108, |
|
"step": 1172 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.9649100104923055e-05, |
|
"loss": 0.0088, |
|
"step": 1173 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.963221787519933e-05, |
|
"loss": 0.0062, |
|
"step": 1174 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.961532549008072e-05, |
|
"loss": 0.0033, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.9598422961291306e-05, |
|
"loss": 0.0038, |
|
"step": 1176 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.9581510300562155e-05, |
|
"loss": 0.0055, |
|
"step": 1177 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.9564587519631384e-05, |
|
"loss": 0.0058, |
|
"step": 1178 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.9547654630244156e-05, |
|
"loss": 0.0075, |
|
"step": 1179 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.953071164415262e-05, |
|
"loss": 0.0041, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.951375857311595e-05, |
|
"loss": 0.0076, |
|
"step": 1181 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.949679542890031e-05, |
|
"loss": 0.0051, |
|
"step": 1182 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 3.947982222327886e-05, |
|
"loss": 0.0038, |
|
"step": 1183 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.946283896803175e-05, |
|
"loss": 0.0016, |
|
"step": 1184 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.944584567494608e-05, |
|
"loss": 0.0086, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.942884235581594e-05, |
|
"loss": 0.0022, |
|
"step": 1186 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 3.941182902244238e-05, |
|
"loss": 0.0064, |
|
"step": 1187 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.93948056866334e-05, |
|
"loss": 0.0048, |
|
"step": 1188 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.937777236020391e-05, |
|
"loss": 0.005, |
|
"step": 1189 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.936072905497579e-05, |
|
"loss": 0.0049, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.934367578277783e-05, |
|
"loss": 0.0055, |
|
"step": 1191 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 3.932661255544576e-05, |
|
"loss": 0.0057, |
|
"step": 1192 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 3.930953938482218e-05, |
|
"loss": 0.0029, |
|
"step": 1193 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 3.929245628275662e-05, |
|
"loss": 0.0063, |
|
"step": 1194 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 3.9275363261105494e-05, |
|
"loss": 0.0081, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.925826033173211e-05, |
|
"loss": 0.0046, |
|
"step": 1196 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.924114750650663e-05, |
|
"loss": 0.0108, |
|
"step": 1197 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.922402479730611e-05, |
|
"loss": 0.0114, |
|
"step": 1198 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.9206892216014446e-05, |
|
"loss": 0.0062, |
|
"step": 1199 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 3.9189749774522396e-05, |
|
"loss": 0.0045, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_loss": 0.02036808617413044, |
|
"eval_runtime": 26.9559, |
|
"eval_samples_per_second": 22.259, |
|
"eval_steps_per_second": 2.782, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 3.917259748472756e-05, |
|
"loss": 0.0016, |
|
"step": 1201 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 3.915543535853437e-05, |
|
"loss": 0.0026, |
|
"step": 1202 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.9138263407854085e-05, |
|
"loss": 0.0058, |
|
"step": 1203 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.9121081644604794e-05, |
|
"loss": 0.0043, |
|
"step": 1204 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.910389008071139e-05, |
|
"loss": 0.0079, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.9086688728105544e-05, |
|
"loss": 0.0046, |
|
"step": 1206 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.9069477598725753e-05, |
|
"loss": 0.0088, |
|
"step": 1207 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.90522567045173e-05, |
|
"loss": 0.0048, |
|
"step": 1208 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.903502605743222e-05, |
|
"loss": 0.008, |
|
"step": 1209 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 3.901778566942934e-05, |
|
"loss": 0.0106, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.900053555247423e-05, |
|
"loss": 0.0076, |
|
"step": 1211 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.8983275718539216e-05, |
|
"loss": 0.0094, |
|
"step": 1212 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.896600617960339e-05, |
|
"loss": 0.0078, |
|
"step": 1213 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.894872694765255e-05, |
|
"loss": 0.005, |
|
"step": 1214 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.893143803467924e-05, |
|
"loss": 0.0057, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.8914139452682705e-05, |
|
"loss": 0.0051, |
|
"step": 1216 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.889683121366893e-05, |
|
"loss": 0.0029, |
|
"step": 1217 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.887951332965056e-05, |
|
"loss": 0.0089, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 3.886218581264699e-05, |
|
"loss": 0.0051, |
|
"step": 1219 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 3.884484867468424e-05, |
|
"loss": 0.0077, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 3.8827501927795055e-05, |
|
"loss": 0.005, |
|
"step": 1221 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.881014558401883e-05, |
|
"loss": 0.0051, |
|
"step": 1222 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.879277965540161e-05, |
|
"loss": 0.0084, |
|
"step": 1223 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.877540415399612e-05, |
|
"loss": 0.0108, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.875801909186171e-05, |
|
"loss": 0.0037, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.8740624481064345e-05, |
|
"loss": 0.0083, |
|
"step": 1226 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.8723220333676666e-05, |
|
"loss": 0.0099, |
|
"step": 1227 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.870580666177791e-05, |
|
"loss": 0.0074, |
|
"step": 1228 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.86883834774539e-05, |
|
"loss": 0.0073, |
|
"step": 1229 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.86709507927971e-05, |
|
"loss": 0.0118, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.8653508619906545e-05, |
|
"loss": 0.0106, |
|
"step": 1231 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.863605697088786e-05, |
|
"loss": 0.0092, |
|
"step": 1232 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.8618595857853234e-05, |
|
"loss": 0.0071, |
|
"step": 1233 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.860112529292147e-05, |
|
"loss": 0.0091, |
|
"step": 1234 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.858364528821788e-05, |
|
"loss": 0.0088, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.856615585587434e-05, |
|
"loss": 0.0081, |
|
"step": 1236 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.8548657008029284e-05, |
|
"loss": 0.0185, |
|
"step": 1237 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.8531148756827685e-05, |
|
"loss": 0.0085, |
|
"step": 1238 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.851363111442101e-05, |
|
"loss": 0.0054, |
|
"step": 1239 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 3.849610409296727e-05, |
|
"loss": 0.0087, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.847856770463097e-05, |
|
"loss": 0.0083, |
|
"step": 1241 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.846102196158315e-05, |
|
"loss": 0.0058, |
|
"step": 1242 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.84434668760013e-05, |
|
"loss": 0.0083, |
|
"step": 1243 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.842590246006942e-05, |
|
"loss": 0.0049, |
|
"step": 1244 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.8408328725977966e-05, |
|
"loss": 0.0054, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.839074568592388e-05, |
|
"loss": 0.0024, |
|
"step": 1246 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.837315335211056e-05, |
|
"loss": 0.0104, |
|
"step": 1247 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.835555173674784e-05, |
|
"loss": 0.0031, |
|
"step": 1248 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.8337940852052024e-05, |
|
"loss": 0.0059, |
|
"step": 1249 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.832032071024581e-05, |
|
"loss": 0.0057, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"eval_loss": 0.02174745313823223, |
|
"eval_runtime": 26.9569, |
|
"eval_samples_per_second": 22.258, |
|
"eval_steps_per_second": 2.782, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.830269132355835e-05, |
|
"loss": 0.009, |
|
"step": 1251 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 3.828505270422521e-05, |
|
"loss": 0.006, |
|
"step": 1252 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.8267404864488355e-05, |
|
"loss": 0.0043, |
|
"step": 1253 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.8249747816596136e-05, |
|
"loss": 0.0034, |
|
"step": 1254 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.823208157280334e-05, |
|
"loss": 0.0079, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.821440614537108e-05, |
|
"loss": 0.0039, |
|
"step": 1256 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.81967215465669e-05, |
|
"loss": 0.005, |
|
"step": 1257 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.8179027788664654e-05, |
|
"loss": 0.0116, |
|
"step": 1258 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.816132488394459e-05, |
|
"loss": 0.0069, |
|
"step": 1259 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.814361284469329e-05, |
|
"loss": 0.004, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.8125891683203686e-05, |
|
"loss": 0.0058, |
|
"step": 1261 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.810816141177502e-05, |
|
"loss": 0.011, |
|
"step": 1262 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 3.809042204271288e-05, |
|
"loss": 0.0048, |
|
"step": 1263 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.807267358832916e-05, |
|
"loss": 0.0045, |
|
"step": 1264 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.805491606094206e-05, |
|
"loss": 0.0043, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.803714947287607e-05, |
|
"loss": 0.0101, |
|
"step": 1266 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.8019373836461966e-05, |
|
"loss": 0.0096, |
|
"step": 1267 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.800158916403682e-05, |
|
"loss": 0.0059, |
|
"step": 1268 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.7983795467943975e-05, |
|
"loss": 0.0075, |
|
"step": 1269 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.796599276053303e-05, |
|
"loss": 0.0056, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.7948181054159814e-05, |
|
"loss": 0.0073, |
|
"step": 1271 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.793036036118645e-05, |
|
"loss": 0.0128, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.7912530693981265e-05, |
|
"loss": 0.0101, |
|
"step": 1273 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.789469206491882e-05, |
|
"loss": 0.0066, |
|
"step": 1274 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.7876844486379895e-05, |
|
"loss": 0.0041, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.785898797075149e-05, |
|
"loss": 0.0103, |
|
"step": 1276 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.784112253042681e-05, |
|
"loss": 0.0134, |
|
"step": 1277 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.782324817780524e-05, |
|
"loss": 0.0045, |
|
"step": 1278 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.7805364925292344e-05, |
|
"loss": 0.0052, |
|
"step": 1279 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.77874727852999e-05, |
|
"loss": 0.0076, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.776957177024582e-05, |
|
"loss": 0.0114, |
|
"step": 1281 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.775166189255417e-05, |
|
"loss": 0.0045, |
|
"step": 1282 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.7733743164655214e-05, |
|
"loss": 0.0074, |
|
"step": 1283 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.77158155989853e-05, |
|
"loss": 0.0053, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.769787920798696e-05, |
|
"loss": 0.0078, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.7679934004108806e-05, |
|
"loss": 0.006, |
|
"step": 1286 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.766197999980562e-05, |
|
"loss": 0.0064, |
|
"step": 1287 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.7644017207538224e-05, |
|
"loss": 0.0053, |
|
"step": 1288 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.76260456397736e-05, |
|
"loss": 0.0089, |
|
"step": 1289 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.7608065308984786e-05, |
|
"loss": 0.0053, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.7590076227650925e-05, |
|
"loss": 0.008, |
|
"step": 1291 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.757207840825721e-05, |
|
"loss": 0.0054, |
|
"step": 1292 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.755407186329492e-05, |
|
"loss": 0.0055, |
|
"step": 1293 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 3.753605660526136e-05, |
|
"loss": 0.0042, |
|
"step": 1294 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.751803264665992e-05, |
|
"loss": 0.002, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0075, |
|
"step": 1296 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.7481958677797055e-05, |
|
"loss": 0.005, |
|
"step": 1297 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.7463908692572526e-05, |
|
"loss": 0.009, |
|
"step": 1298 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.74458500568539e-05, |
|
"loss": 0.0062, |
|
"step": 1299 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.742778278317465e-05, |
|
"loss": 0.0061, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"eval_loss": 0.02023252286016941, |
|
"eval_runtime": 26.9716, |
|
"eval_samples_per_second": 22.246, |
|
"eval_steps_per_second": 2.781, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 3.740970688407425e-05, |
|
"loss": 0.0031, |
|
"step": 1301 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.739162237209817e-05, |
|
"loss": 0.0115, |
|
"step": 1302 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.737352925979782e-05, |
|
"loss": 0.0072, |
|
"step": 1303 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.735542755973064e-05, |
|
"loss": 0.0074, |
|
"step": 1304 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 3.7337317284459975e-05, |
|
"loss": 0.0078, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.7319198446555134e-05, |
|
"loss": 0.0126, |
|
"step": 1306 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.73010710585914e-05, |
|
"loss": 0.0102, |
|
"step": 1307 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.728293513314995e-05, |
|
"loss": 0.0035, |
|
"step": 1308 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.7264790682817926e-05, |
|
"loss": 0.0044, |
|
"step": 1309 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.724663772018834e-05, |
|
"loss": 0.0052, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.7228476257860145e-05, |
|
"loss": 0.006, |
|
"step": 1311 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.721030630843819e-05, |
|
"loss": 0.012, |
|
"step": 1312 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.719212788453321e-05, |
|
"loss": 0.0032, |
|
"step": 1313 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.717394099876182e-05, |
|
"loss": 0.0048, |
|
"step": 1314 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.71557456637465e-05, |
|
"loss": 0.0051, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.713754189211561e-05, |
|
"loss": 0.0042, |
|
"step": 1316 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.711932969650337e-05, |
|
"loss": 0.0024, |
|
"step": 1317 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.7101109089549815e-05, |
|
"loss": 0.0082, |
|
"step": 1318 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.708288008390083e-05, |
|
"loss": 0.0049, |
|
"step": 1319 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.706464269220817e-05, |
|
"loss": 0.0041, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 3.704639692712936e-05, |
|
"loss": 0.0022, |
|
"step": 1321 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 3.702814280132775e-05, |
|
"loss": 0.0036, |
|
"step": 1322 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 3.700988032747249e-05, |
|
"loss": 0.0096, |
|
"step": 1323 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 3.6991609518238547e-05, |
|
"loss": 0.0061, |
|
"step": 1324 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.697333038630665e-05, |
|
"loss": 0.0112, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.6955042944363304e-05, |
|
"loss": 0.0027, |
|
"step": 1326 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.69367472051008e-05, |
|
"loss": 0.015, |
|
"step": 1327 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.691844318121717e-05, |
|
"loss": 0.0055, |
|
"step": 1328 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.690013088541619e-05, |
|
"loss": 0.0032, |
|
"step": 1329 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.688181033040741e-05, |
|
"loss": 0.0033, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.686348152890608e-05, |
|
"loss": 0.0104, |
|
"step": 1331 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.684514449363318e-05, |
|
"loss": 0.0032, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.6826799237315425e-05, |
|
"loss": 0.0085, |
|
"step": 1333 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.680844577268521e-05, |
|
"loss": 0.0044, |
|
"step": 1334 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.679008411248063e-05, |
|
"loss": 0.0073, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.677171426944548e-05, |
|
"loss": 0.0053, |
|
"step": 1336 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.675333625632923e-05, |
|
"loss": 0.0046, |
|
"step": 1337 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.673495008588702e-05, |
|
"loss": 0.0075, |
|
"step": 1338 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.6716555770879654e-05, |
|
"loss": 0.0065, |
|
"step": 1339 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.669815332407358e-05, |
|
"loss": 0.0089, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 3.66797427582409e-05, |
|
"loss": 0.0107, |
|
"step": 1341 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 3.666132408615935e-05, |
|
"loss": 0.0029, |
|
"step": 1342 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 3.664289732061228e-05, |
|
"loss": 0.0095, |
|
"step": 1343 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 3.662446247438867e-05, |
|
"loss": 0.0029, |
|
"step": 1344 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 3.660601956028309e-05, |
|
"loss": 0.0071, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 3.658756859109574e-05, |
|
"loss": 0.0084, |
|
"step": 1346 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 3.656910957963241e-05, |
|
"loss": 0.0038, |
|
"step": 1347 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.655064253870441e-05, |
|
"loss": 0.0077, |
|
"step": 1348 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.653216748112872e-05, |
|
"loss": 0.0098, |
|
"step": 1349 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.65136844197278e-05, |
|
"loss": 0.0066, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"eval_loss": 0.017896093428134918, |
|
"eval_runtime": 26.9269, |
|
"eval_samples_per_second": 22.283, |
|
"eval_steps_per_second": 2.785, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 3.6495193367329705e-05, |
|
"loss": 0.0075, |
|
"step": 1351 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.647669433676803e-05, |
|
"loss": 0.0054, |
|
"step": 1352 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.64581873408819e-05, |
|
"loss": 0.0053, |
|
"step": 1353 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.6439672392515975e-05, |
|
"loss": 0.0087, |
|
"step": 1354 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.642114950452046e-05, |
|
"loss": 0.0066, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 3.6402618689751e-05, |
|
"loss": 0.0052, |
|
"step": 1356 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 3.6384079961068815e-05, |
|
"loss": 0.0104, |
|
"step": 1357 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 3.636553333134058e-05, |
|
"loss": 0.0023, |
|
"step": 1358 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 3.6346978813438464e-05, |
|
"loss": 0.0065, |
|
"step": 1359 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 3.63284164202401e-05, |
|
"loss": 0.009, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 3.630984616462861e-05, |
|
"loss": 0.0051, |
|
"step": 1361 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 3.629126805949255e-05, |
|
"loss": 0.0096, |
|
"step": 1362 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.627268211772592e-05, |
|
"loss": 0.0103, |
|
"step": 1363 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.625408835222819e-05, |
|
"loss": 0.0047, |
|
"step": 1364 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.623548677590424e-05, |
|
"loss": 0.0057, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.621687740166436e-05, |
|
"loss": 0.0049, |
|
"step": 1366 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.619826024242427e-05, |
|
"loss": 0.0095, |
|
"step": 1367 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.6179635311105095e-05, |
|
"loss": 0.003, |
|
"step": 1368 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.6161002620633326e-05, |
|
"loss": 0.0056, |
|
"step": 1369 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.614236218394088e-05, |
|
"loss": 0.0024, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.612371401396502e-05, |
|
"loss": 0.0073, |
|
"step": 1371 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.61050581236484e-05, |
|
"loss": 0.0057, |
|
"step": 1372 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.608639452593899e-05, |
|
"loss": 0.0027, |
|
"step": 1373 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.606772323379017e-05, |
|
"loss": 0.0098, |
|
"step": 1374 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.60490442601606e-05, |
|
"loss": 0.0062, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.6030357618014316e-05, |
|
"loss": 0.0064, |
|
"step": 1376 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.601166332032066e-05, |
|
"loss": 0.0078, |
|
"step": 1377 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.599296138005428e-05, |
|
"loss": 0.0041, |
|
"step": 1378 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.597425181019513e-05, |
|
"loss": 0.02, |
|
"step": 1379 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.595553462372848e-05, |
|
"loss": 0.0048, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.5936809833644855e-05, |
|
"loss": 0.0078, |
|
"step": 1381 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.591807745294008e-05, |
|
"loss": 0.007, |
|
"step": 1382 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.5899337494615245e-05, |
|
"loss": 0.0068, |
|
"step": 1383 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.5880589971676685e-05, |
|
"loss": 0.0056, |
|
"step": 1384 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.5861834897136006e-05, |
|
"loss": 0.0039, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 3.584307228401004e-05, |
|
"loss": 0.0066, |
|
"step": 1386 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 3.582430214532085e-05, |
|
"loss": 0.0091, |
|
"step": 1387 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 3.580552449409573e-05, |
|
"loss": 0.0056, |
|
"step": 1388 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 3.57867393433672e-05, |
|
"loss": 0.0029, |
|
"step": 1389 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.576794670617295e-05, |
|
"loss": 0.0132, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.57491465955559e-05, |
|
"loss": 0.003, |
|
"step": 1391 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.573033902456414e-05, |
|
"loss": 0.0065, |
|
"step": 1392 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.571152400625094e-05, |
|
"loss": 0.0053, |
|
"step": 1393 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 3.5692701553674735e-05, |
|
"loss": 0.0117, |
|
"step": 1394 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 3.567387167989913e-05, |
|
"loss": 0.0065, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 3.565503439799289e-05, |
|
"loss": 0.0032, |
|
"step": 1396 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 3.563618972102988e-05, |
|
"loss": 0.0055, |
|
"step": 1397 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.561733766208915e-05, |
|
"loss": 0.0058, |
|
"step": 1398 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.5598478234254817e-05, |
|
"loss": 0.0046, |
|
"step": 1399 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.5579611450616165e-05, |
|
"loss": 0.0087, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"eval_loss": 0.018370576202869415, |
|
"eval_runtime": 27.0067, |
|
"eval_samples_per_second": 22.217, |
|
"eval_steps_per_second": 2.777, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 3.556073732426757e-05, |
|
"loss": 0.0067, |
|
"step": 1401 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 3.554185586830848e-05, |
|
"loss": 0.0091, |
|
"step": 1402 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 3.5522967095843447e-05, |
|
"loss": 0.0023, |
|
"step": 1403 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 3.55040710199821e-05, |
|
"loss": 0.0098, |
|
"step": 1404 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 3.548516765383914e-05, |
|
"loss": 0.0046, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 3.5466257010534324e-05, |
|
"loss": 0.0076, |
|
"step": 1406 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 3.5447339103192454e-05, |
|
"loss": 0.0069, |
|
"step": 1407 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 3.5428413944943386e-05, |
|
"loss": 0.0071, |
|
"step": 1408 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.540948154892201e-05, |
|
"loss": 0.0057, |
|
"step": 1409 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.53905419282682e-05, |
|
"loss": 0.0069, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.537159509612693e-05, |
|
"loss": 0.0087, |
|
"step": 1411 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.5352641065648065e-05, |
|
"loss": 0.0089, |
|
"step": 1412 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.5333679849986564e-05, |
|
"loss": 0.0055, |
|
"step": 1413 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.5314711462302315e-05, |
|
"loss": 0.0041, |
|
"step": 1414 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.529573591576022e-05, |
|
"loss": 0.0047, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 3.527675322353011e-05, |
|
"loss": 0.0047, |
|
"step": 1416 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.5257763398786824e-05, |
|
"loss": 0.0136, |
|
"step": 1417 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.523876645471011e-05, |
|
"loss": 0.0034, |
|
"step": 1418 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.521976240448468e-05, |
|
"loss": 0.0048, |
|
"step": 1419 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.520075126130016e-05, |
|
"loss": 0.0043, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 3.518173303835111e-05, |
|
"loss": 0.0119, |
|
"step": 1421 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 3.5162707748837014e-05, |
|
"loss": 0.0059, |
|
"step": 1422 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 3.514367540596224e-05, |
|
"loss": 0.0089, |
|
"step": 1423 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 3.512463602293608e-05, |
|
"loss": 0.0123, |
|
"step": 1424 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 3.510558961297265e-05, |
|
"loss": 0.0036, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 3.508653618929103e-05, |
|
"loss": 0.0084, |
|
"step": 1426 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 3.5067475765115104e-05, |
|
"loss": 0.0088, |
|
"step": 1427 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 3.5048408353673643e-05, |
|
"loss": 0.0056, |
|
"step": 1428 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 3.502933396820025e-05, |
|
"loss": 0.0053, |
|
"step": 1429 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 3.5010252621933374e-05, |
|
"loss": 0.0022, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 3.499116432811631e-05, |
|
"loss": 0.0057, |
|
"step": 1431 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 3.4972069099997164e-05, |
|
"loss": 0.0051, |
|
"step": 1432 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 3.4952966950828835e-05, |
|
"loss": 0.0185, |
|
"step": 1433 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 3.493385789386906e-05, |
|
"loss": 0.004, |
|
"step": 1434 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 3.4914741942380355e-05, |
|
"loss": 0.0042, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 3.4895619109630014e-05, |
|
"loss": 0.0039, |
|
"step": 1436 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 3.4876489408890126e-05, |
|
"loss": 0.0089, |
|
"step": 1437 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 3.485735285343752e-05, |
|
"loss": 0.0048, |
|
"step": 1438 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 3.48382094565538e-05, |
|
"loss": 0.0121, |
|
"step": 1439 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 3.481905923152532e-05, |
|
"loss": 0.0073, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 3.479990219164316e-05, |
|
"loss": 0.0054, |
|
"step": 1441 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 3.478073835020315e-05, |
|
"loss": 0.007, |
|
"step": 1442 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 3.476156772050582e-05, |
|
"loss": 0.0022, |
|
"step": 1443 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 3.474239031585642e-05, |
|
"loss": 0.0069, |
|
"step": 1444 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 3.4723206149564913e-05, |
|
"loss": 0.0012, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 3.4704015234945926e-05, |
|
"loss": 0.004, |
|
"step": 1446 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 3.46848175853188e-05, |
|
"loss": 0.0152, |
|
"step": 1447 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 3.466561321400753e-05, |
|
"loss": 0.0083, |
|
"step": 1448 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 3.464640213434079e-05, |
|
"loss": 0.0082, |
|
"step": 1449 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 3.46271843596519e-05, |
|
"loss": 0.0074, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_loss": 0.018056336790323257, |
|
"eval_runtime": 26.9975, |
|
"eval_samples_per_second": 22.224, |
|
"eval_steps_per_second": 2.778, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.460795990327883e-05, |
|
"loss": 0.0028, |
|
"step": 1451 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.45887287785642e-05, |
|
"loss": 0.0061, |
|
"step": 1452 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.456949099885522e-05, |
|
"loss": 0.0045, |
|
"step": 1453 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 3.4550246577503776e-05, |
|
"loss": 0.0057, |
|
"step": 1454 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 3.453099552786631e-05, |
|
"loss": 0.0026, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 3.451173786330389e-05, |
|
"loss": 0.0109, |
|
"step": 1456 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 3.4492473597182184e-05, |
|
"loss": 0.0085, |
|
"step": 1457 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 3.447320274287142e-05, |
|
"loss": 0.0028, |
|
"step": 1458 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 3.4453925313746405e-05, |
|
"loss": 0.0106, |
|
"step": 1459 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 3.443464132318653e-05, |
|
"loss": 0.0076, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 3.44153507845757e-05, |
|
"loss": 0.0041, |
|
"step": 1461 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 3.4396053711302395e-05, |
|
"loss": 0.0089, |
|
"step": 1462 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.4376750116759625e-05, |
|
"loss": 0.0057, |
|
"step": 1463 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.435744001434492e-05, |
|
"loss": 0.0045, |
|
"step": 1464 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.4338123417460336e-05, |
|
"loss": 0.0054, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.431880033951242e-05, |
|
"loss": 0.0089, |
|
"step": 1466 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.4299470793912235e-05, |
|
"loss": 0.0024, |
|
"step": 1467 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.4280134794075326e-05, |
|
"loss": 0.0133, |
|
"step": 1468 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.4260792353421706e-05, |
|
"loss": 0.0093, |
|
"step": 1469 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.424144348537589e-05, |
|
"loss": 0.0157, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.4222088203366816e-05, |
|
"loss": 0.0074, |
|
"step": 1471 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.420272652082789e-05, |
|
"loss": 0.0065, |
|
"step": 1472 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.418335845119697e-05, |
|
"loss": 0.0052, |
|
"step": 1473 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 3.416398400791633e-05, |
|
"loss": 0.0058, |
|
"step": 1474 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 3.414460320443269e-05, |
|
"loss": 0.0061, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 3.412521605419715e-05, |
|
"loss": 0.0065, |
|
"step": 1476 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 3.410582257066524e-05, |
|
"loss": 0.0041, |
|
"step": 1477 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 3.408642276729689e-05, |
|
"loss": 0.0029, |
|
"step": 1478 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 3.40670166575564e-05, |
|
"loss": 0.0077, |
|
"step": 1479 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 3.4047604254912445e-05, |
|
"loss": 0.0082, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 3.402818557283809e-05, |
|
"loss": 0.0059, |
|
"step": 1481 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 3.400876062481074e-05, |
|
"loss": 0.0035, |
|
"step": 1482 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 3.3989329424312145e-05, |
|
"loss": 0.0081, |
|
"step": 1483 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 3.3969891984828406e-05, |
|
"loss": 0.0043, |
|
"step": 1484 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 3.395044831984996e-05, |
|
"loss": 0.0128, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 3.3930998442871534e-05, |
|
"loss": 0.0035, |
|
"step": 1486 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 3.391154236739221e-05, |
|
"loss": 0.0044, |
|
"step": 1487 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 3.389208010691535e-05, |
|
"loss": 0.0034, |
|
"step": 1488 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.387261167494858e-05, |
|
"loss": 0.0149, |
|
"step": 1489 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.385313708500386e-05, |
|
"loss": 0.0063, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.383365635059741e-05, |
|
"loss": 0.0048, |
|
"step": 1491 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 3.381416948524968e-05, |
|
"loss": 0.0049, |
|
"step": 1492 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.379467650248542e-05, |
|
"loss": 0.0093, |
|
"step": 1493 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.3775177415833605e-05, |
|
"loss": 0.0066, |
|
"step": 1494 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.375567223882744e-05, |
|
"loss": 0.0048, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.373616098500437e-05, |
|
"loss": 0.0063, |
|
"step": 1496 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.371664366790606e-05, |
|
"loss": 0.0061, |
|
"step": 1497 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.369712030107836e-05, |
|
"loss": 0.0131, |
|
"step": 1498 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.367759089807134e-05, |
|
"loss": 0.0087, |
|
"step": 1499 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.365805547243927e-05, |
|
"loss": 0.0057, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"eval_loss": 0.016999540850520134, |
|
"eval_runtime": 27.3863, |
|
"eval_samples_per_second": 21.909, |
|
"eval_steps_per_second": 2.739, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.363851403774057e-05, |
|
"loss": 0.0049, |
|
"step": 1501 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.3618966607537835e-05, |
|
"loss": 0.0053, |
|
"step": 1502 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.359941319539785e-05, |
|
"loss": 0.0099, |
|
"step": 1503 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 3.357985381489153e-05, |
|
"loss": 0.0086, |
|
"step": 1504 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 3.356028847959392e-05, |
|
"loss": 0.0027, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 3.354071720308423e-05, |
|
"loss": 0.019, |
|
"step": 1506 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 3.352113999894576e-05, |
|
"loss": 0.0058, |
|
"step": 1507 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 3.3501556880765944e-05, |
|
"loss": 0.0087, |
|
"step": 1508 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 3.348196786213633e-05, |
|
"loss": 0.0071, |
|
"step": 1509 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 3.3462372956652546e-05, |
|
"loss": 0.0052, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 3.3442772177914284e-05, |
|
"loss": 0.0075, |
|
"step": 1511 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 3.342316553952536e-05, |
|
"loss": 0.0046, |
|
"step": 1512 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 3.340355305509363e-05, |
|
"loss": 0.0041, |
|
"step": 1513 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 3.3383934738231e-05, |
|
"loss": 0.004, |
|
"step": 1514 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 3.336431060255344e-05, |
|
"loss": 0.0061, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 3.334468066168095e-05, |
|
"loss": 0.0115, |
|
"step": 1516 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 3.332504492923757e-05, |
|
"loss": 0.0092, |
|
"step": 1517 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 3.3305403418851344e-05, |
|
"loss": 0.0095, |
|
"step": 1518 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 3.3285756144154334e-05, |
|
"loss": 0.0035, |
|
"step": 1519 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.3266103118782606e-05, |
|
"loss": 0.0118, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.3246444356376205e-05, |
|
"loss": 0.0047, |
|
"step": 1521 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.3226779870579183e-05, |
|
"loss": 0.0047, |
|
"step": 1522 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 3.3207109675039527e-05, |
|
"loss": 0.006, |
|
"step": 1523 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3187433783409216e-05, |
|
"loss": 0.01, |
|
"step": 1524 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.316775220934417e-05, |
|
"loss": 0.0064, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.314806496650427e-05, |
|
"loss": 0.0043, |
|
"step": 1526 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.312837206855331e-05, |
|
"loss": 0.0027, |
|
"step": 1527 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.310867352915901e-05, |
|
"loss": 0.0024, |
|
"step": 1528 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.3088969361993004e-05, |
|
"loss": 0.0027, |
|
"step": 1529 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 3.306925958073087e-05, |
|
"loss": 0.0025, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.3049544199052034e-05, |
|
"loss": 0.0056, |
|
"step": 1531 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.3029823230639824e-05, |
|
"loss": 0.0048, |
|
"step": 1532 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.301009668918145e-05, |
|
"loss": 0.0033, |
|
"step": 1533 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.299036458836801e-05, |
|
"loss": 0.0052, |
|
"step": 1534 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.29706269418944e-05, |
|
"loss": 0.0073, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.295088376345944e-05, |
|
"loss": 0.0031, |
|
"step": 1536 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.293113506676573e-05, |
|
"loss": 0.0046, |
|
"step": 1537 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 3.2911380865519736e-05, |
|
"loss": 0.0071, |
|
"step": 1538 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.289162117343173e-05, |
|
"loss": 0.0039, |
|
"step": 1539 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.28718560042158e-05, |
|
"loss": 0.0078, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.285208537158982e-05, |
|
"loss": 0.0034, |
|
"step": 1541 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 3.283230928927548e-05, |
|
"loss": 0.0018, |
|
"step": 1542 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.2812527770998234e-05, |
|
"loss": 0.006, |
|
"step": 1543 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.279274083048731e-05, |
|
"loss": 0.0045, |
|
"step": 1544 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 3.277294848147572e-05, |
|
"loss": 0.006, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.27531507377002e-05, |
|
"loss": 0.0074, |
|
"step": 1546 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.273334761290125e-05, |
|
"loss": 0.005, |
|
"step": 1547 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.2713539120823095e-05, |
|
"loss": 0.0047, |
|
"step": 1548 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.269372527521369e-05, |
|
"loss": 0.0049, |
|
"step": 1549 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 3.2673906089824716e-05, |
|
"loss": 0.0053, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"eval_loss": 0.019064469262957573, |
|
"eval_runtime": 27.2336, |
|
"eval_samples_per_second": 22.032, |
|
"eval_steps_per_second": 2.754, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 3.265408157841153e-05, |
|
"loss": 0.0027, |
|
"step": 1551 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 3.263425175473323e-05, |
|
"loss": 0.0076, |
|
"step": 1552 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 3.2614416632552544e-05, |
|
"loss": 0.005, |
|
"step": 1553 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 3.259457622563593e-05, |
|
"loss": 0.0031, |
|
"step": 1554 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 3.257473054775348e-05, |
|
"loss": 0.0021, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 3.2554879612678966e-05, |
|
"loss": 0.0081, |
|
"step": 1556 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 3.2535023434189796e-05, |
|
"loss": 0.0099, |
|
"step": 1557 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 3.251516202606701e-05, |
|
"loss": 0.0057, |
|
"step": 1558 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 3.249529540209531e-05, |
|
"loss": 0.0017, |
|
"step": 1559 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 3.247542357606299e-05, |
|
"loss": 0.0034, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 3.245554656176194e-05, |
|
"loss": 0.0028, |
|
"step": 1561 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 3.243566437298769e-05, |
|
"loss": 0.0049, |
|
"step": 1562 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 3.241577702353934e-05, |
|
"loss": 0.0081, |
|
"step": 1563 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 3.239588452721958e-05, |
|
"loss": 0.0017, |
|
"step": 1564 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 3.237598689783465e-05, |
|
"loss": 0.0027, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 3.2356084149194374e-05, |
|
"loss": 0.0008, |
|
"step": 1566 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 3.233617629511214e-05, |
|
"loss": 0.0041, |
|
"step": 1567 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 3.2316263349404844e-05, |
|
"loss": 0.0007, |
|
"step": 1568 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 3.229634532589296e-05, |
|
"loss": 0.0066, |
|
"step": 1569 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 3.227642223840043e-05, |
|
"loss": 0.0006, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 3.225649410075476e-05, |
|
"loss": 0.0075, |
|
"step": 1571 |
|
}, |
|
{ |
|
"epoch": 4.12, |
|
"learning_rate": 3.2236560926786953e-05, |
|
"loss": 0.0025, |
|
"step": 1572 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 3.221662273033148e-05, |
|
"loss": 0.0067, |
|
"step": 1573 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 3.219667952522632e-05, |
|
"loss": 0.0006, |
|
"step": 1574 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 3.217673132531293e-05, |
|
"loss": 0.0045, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 3.215677814443622e-05, |
|
"loss": 0.0013, |
|
"step": 1576 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 3.2136819996444564e-05, |
|
"loss": 0.0011, |
|
"step": 1577 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 3.211685689518978e-05, |
|
"loss": 0.0042, |
|
"step": 1578 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 3.209688885452715e-05, |
|
"loss": 0.0027, |
|
"step": 1579 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 3.207691588831534e-05, |
|
"loss": 0.0051, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 3.205693801041646e-05, |
|
"loss": 0.0045, |
|
"step": 1581 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 3.203695523469603e-05, |
|
"loss": 0.0042, |
|
"step": 1582 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 3.201696757502296e-05, |
|
"loss": 0.0041, |
|
"step": 1583 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 3.199697504526955e-05, |
|
"loss": 0.0016, |
|
"step": 1584 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 3.19769776593115e-05, |
|
"loss": 0.0034, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 3.195697543102785e-05, |
|
"loss": 0.0028, |
|
"step": 1586 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 3.193696837430102e-05, |
|
"loss": 0.0078, |
|
"step": 1587 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 3.191695650301678e-05, |
|
"loss": 0.0091, |
|
"step": 1588 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 3.189693983106423e-05, |
|
"loss": 0.0057, |
|
"step": 1589 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 3.1876918372335825e-05, |
|
"loss": 0.0047, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 3.185689214072731e-05, |
|
"loss": 0.0057, |
|
"step": 1591 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 3.183686115013776e-05, |
|
"loss": 0.0083, |
|
"step": 1592 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 3.1816825414469564e-05, |
|
"loss": 0.0049, |
|
"step": 1593 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 3.179678494762839e-05, |
|
"loss": 0.0094, |
|
"step": 1594 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 3.1776739763523187e-05, |
|
"loss": 0.0057, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 3.175668987606619e-05, |
|
"loss": 0.0031, |
|
"step": 1596 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 3.173663529917289e-05, |
|
"loss": 0.0039, |
|
"step": 1597 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 3.1716576046762034e-05, |
|
"loss": 0.0025, |
|
"step": 1598 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 3.169651213275562e-05, |
|
"loss": 0.0014, |
|
"step": 1599 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.167644357107886e-05, |
|
"loss": 0.0051, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"eval_loss": 0.017935335636138916, |
|
"eval_runtime": 27.0809, |
|
"eval_samples_per_second": 22.156, |
|
"eval_steps_per_second": 2.769, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.165637037566022e-05, |
|
"loss": 0.0028, |
|
"step": 1601 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.1636292560431366e-05, |
|
"loss": 0.0024, |
|
"step": 1602 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.161621013932719e-05, |
|
"loss": 0.0034, |
|
"step": 1603 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 3.159612312628574e-05, |
|
"loss": 0.0092, |
|
"step": 1604 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 3.157603153524828e-05, |
|
"loss": 0.0101, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 3.155593538015926e-05, |
|
"loss": 0.0052, |
|
"step": 1606 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.153583467496628e-05, |
|
"loss": 0.0022, |
|
"step": 1607 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.151572943362009e-05, |
|
"loss": 0.0018, |
|
"step": 1608 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.149561967007462e-05, |
|
"loss": 0.0024, |
|
"step": 1609 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.147550539828691e-05, |
|
"loss": 0.0042, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 3.1455386632217144e-05, |
|
"loss": 0.0039, |
|
"step": 1611 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 3.143526338582861e-05, |
|
"loss": 0.0036, |
|
"step": 1612 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 3.141513567308772e-05, |
|
"loss": 0.002, |
|
"step": 1613 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 3.139500350796397e-05, |
|
"loss": 0.0035, |
|
"step": 1614 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 3.137486690442998e-05, |
|
"loss": 0.0019, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 3.135472587646142e-05, |
|
"loss": 0.0027, |
|
"step": 1616 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 3.133458043803703e-05, |
|
"loss": 0.0041, |
|
"step": 1617 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 3.131443060313862e-05, |
|
"loss": 0.0066, |
|
"step": 1618 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.129427638575106e-05, |
|
"loss": 0.0076, |
|
"step": 1619 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.1274117799862244e-05, |
|
"loss": 0.0048, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.125395485946311e-05, |
|
"loss": 0.0074, |
|
"step": 1621 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.1233787578547616e-05, |
|
"loss": 0.0043, |
|
"step": 1622 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 3.1213615971112727e-05, |
|
"loss": 0.0046, |
|
"step": 1623 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 3.1193440051158426e-05, |
|
"loss": 0.0027, |
|
"step": 1624 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 3.1173259832687655e-05, |
|
"loss": 0.0045, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 3.1153075329706385e-05, |
|
"loss": 0.0084, |
|
"step": 1626 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 3.113288655622352e-05, |
|
"loss": 0.0009, |
|
"step": 1627 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 3.111269352625097e-05, |
|
"loss": 0.0029, |
|
"step": 1628 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 3.1092496253803546e-05, |
|
"loss": 0.0014, |
|
"step": 1629 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 3.107229475289905e-05, |
|
"loss": 0.0058, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 3.10520890375582e-05, |
|
"loss": 0.0025, |
|
"step": 1631 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 3.103187912180464e-05, |
|
"loss": 0.0031, |
|
"step": 1632 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 3.1011665019664924e-05, |
|
"loss": 0.0067, |
|
"step": 1633 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 3.0991446745168516e-05, |
|
"loss": 0.0007, |
|
"step": 1634 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 3.097122431234779e-05, |
|
"loss": 0.0121, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 3.095099773523798e-05, |
|
"loss": 0.0041, |
|
"step": 1636 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 3.093076702787722e-05, |
|
"loss": 0.0051, |
|
"step": 1637 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 3.0910532204306484e-05, |
|
"loss": 0.0077, |
|
"step": 1638 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 3.089029327856963e-05, |
|
"loss": 0.0017, |
|
"step": 1639 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 3.087005026471335e-05, |
|
"loss": 0.007, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 3.0849803176787186e-05, |
|
"loss": 0.0007, |
|
"step": 1641 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 3.082955202884347e-05, |
|
"loss": 0.0042, |
|
"step": 1642 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 3.0809296834937404e-05, |
|
"loss": 0.0086, |
|
"step": 1643 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 3.078903760912695e-05, |
|
"loss": 0.0098, |
|
"step": 1644 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 3.0768774365472916e-05, |
|
"loss": 0.0047, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 3.074850711803886e-05, |
|
"loss": 0.0065, |
|
"step": 1646 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 3.072823588089112e-05, |
|
"loss": 0.0065, |
|
"step": 1647 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 3.070796066809883e-05, |
|
"loss": 0.0054, |
|
"step": 1648 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 3.0687681493733874e-05, |
|
"loss": 0.0067, |
|
"step": 1649 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 3.066739837187088e-05, |
|
"loss": 0.0051, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"eval_loss": 0.018013320863246918, |
|
"eval_runtime": 26.893, |
|
"eval_samples_per_second": 22.311, |
|
"eval_steps_per_second": 2.789, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 3.0647111316587194e-05, |
|
"loss": 0.0043, |
|
"step": 1651 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 3.062682034196293e-05, |
|
"loss": 0.0057, |
|
"step": 1652 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.060652546208091e-05, |
|
"loss": 0.004, |
|
"step": 1653 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.0586226691026656e-05, |
|
"loss": 0.001, |
|
"step": 1654 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.0565924042888386e-05, |
|
"loss": 0.0034, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.054561753175704e-05, |
|
"loss": 0.0055, |
|
"step": 1656 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 3.05253071717262e-05, |
|
"loss": 0.0052, |
|
"step": 1657 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 3.0504992976892166e-05, |
|
"loss": 0.0046, |
|
"step": 1658 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 3.048467496135384e-05, |
|
"loss": 0.0019, |
|
"step": 1659 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 3.046435313921282e-05, |
|
"loss": 0.0025, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.0444027524573338e-05, |
|
"loss": 0.0086, |
|
"step": 1661 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.042369813154225e-05, |
|
"loss": 0.0089, |
|
"step": 1662 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.040336497422904e-05, |
|
"loss": 0.0048, |
|
"step": 1663 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.03830280667458e-05, |
|
"loss": 0.0034, |
|
"step": 1664 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 3.036268742320722e-05, |
|
"loss": 0.005, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 3.0342343057730605e-05, |
|
"loss": 0.0029, |
|
"step": 1666 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 3.0321994984435826e-05, |
|
"loss": 0.0045, |
|
"step": 1667 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 3.030164321744532e-05, |
|
"loss": 0.0028, |
|
"step": 1668 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 3.0281287770884098e-05, |
|
"loss": 0.0031, |
|
"step": 1669 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 3.0260928658879727e-05, |
|
"loss": 0.0039, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 3.0240565895562335e-05, |
|
"loss": 0.0027, |
|
"step": 1671 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.0220199495064526e-05, |
|
"loss": 0.0065, |
|
"step": 1672 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.0199829471521494e-05, |
|
"loss": 0.0027, |
|
"step": 1673 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.017945583907092e-05, |
|
"loss": 0.0013, |
|
"step": 1674 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.0159078611852983e-05, |
|
"loss": 0.0021, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.0138697804010357e-05, |
|
"loss": 0.006, |
|
"step": 1676 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.0118313429688215e-05, |
|
"loss": 0.0087, |
|
"step": 1677 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.0097925503034202e-05, |
|
"loss": 0.0054, |
|
"step": 1678 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 3.007753403819842e-05, |
|
"loss": 0.0041, |
|
"step": 1679 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 3.005713904933344e-05, |
|
"loss": 0.0051, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 3.0036740550594254e-05, |
|
"loss": 0.0072, |
|
"step": 1681 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 3.001633855613832e-05, |
|
"loss": 0.0029, |
|
"step": 1682 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 2.9995933080125504e-05, |
|
"loss": 0.0052, |
|
"step": 1683 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 2.9975524136718097e-05, |
|
"loss": 0.0021, |
|
"step": 1684 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 2.995511174008078e-05, |
|
"loss": 0.0037, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 2.9934695904380655e-05, |
|
"loss": 0.0031, |
|
"step": 1686 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 2.9914276643787192e-05, |
|
"loss": 0.0061, |
|
"step": 1687 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 2.989385397247226e-05, |
|
"loss": 0.0053, |
|
"step": 1688 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 2.9873427904610057e-05, |
|
"loss": 0.0024, |
|
"step": 1689 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 2.9852998454377172e-05, |
|
"loss": 0.0032, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 2.9832565635952537e-05, |
|
"loss": 0.004, |
|
"step": 1691 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 2.9812129463517413e-05, |
|
"loss": 0.0046, |
|
"step": 1692 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 2.979168995125538e-05, |
|
"loss": 0.0023, |
|
"step": 1693 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 2.9771247113352357e-05, |
|
"loss": 0.0051, |
|
"step": 1694 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 2.9750800963996566e-05, |
|
"loss": 0.0043, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 2.9730351517378514e-05, |
|
"loss": 0.0019, |
|
"step": 1696 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 2.9709898787691014e-05, |
|
"loss": 0.0029, |
|
"step": 1697 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 2.968944278912914e-05, |
|
"loss": 0.0052, |
|
"step": 1698 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.9668983535890248e-05, |
|
"loss": 0.0018, |
|
"step": 1699 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.964852104217395e-05, |
|
"loss": 0.0008, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"eval_loss": 0.01821320876479149, |
|
"eval_runtime": 27.1196, |
|
"eval_samples_per_second": 22.124, |
|
"eval_steps_per_second": 2.766, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.9628055322182102e-05, |
|
"loss": 0.0023, |
|
"step": 1701 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 2.9607586390118807e-05, |
|
"loss": 0.0142, |
|
"step": 1702 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.9587114260190386e-05, |
|
"loss": 0.0073, |
|
"step": 1703 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.956663894660539e-05, |
|
"loss": 0.0025, |
|
"step": 1704 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.9546160463574592e-05, |
|
"loss": 0.002, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 2.952567882531092e-05, |
|
"loss": 0.0022, |
|
"step": 1706 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 2.950519404602954e-05, |
|
"loss": 0.002, |
|
"step": 1707 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 2.9484706139947765e-05, |
|
"loss": 0.0044, |
|
"step": 1708 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 2.946421512128511e-05, |
|
"loss": 0.0024, |
|
"step": 1709 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.9443721004263223e-05, |
|
"loss": 0.0009, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.9423223803105903e-05, |
|
"loss": 0.0042, |
|
"step": 1711 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.9402723532039113e-05, |
|
"loss": 0.0034, |
|
"step": 1712 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.9382220205290923e-05, |
|
"loss": 0.0105, |
|
"step": 1713 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.936171383709154e-05, |
|
"loss": 0.0011, |
|
"step": 1714 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 0.0064, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.932069203327051e-05, |
|
"loss": 0.0078, |
|
"step": 1716 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.930017662611978e-05, |
|
"loss": 0.007, |
|
"step": 1717 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.9279658234459667e-05, |
|
"loss": 0.0023, |
|
"step": 1718 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.9259136872530812e-05, |
|
"loss": 0.0099, |
|
"step": 1719 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.9238612554575938e-05, |
|
"loss": 0.0072, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 2.921808529483982e-05, |
|
"loss": 0.0012, |
|
"step": 1721 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.9197555107569264e-05, |
|
"loss": 0.0046, |
|
"step": 1722 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.91770220070131e-05, |
|
"loss": 0.0008, |
|
"step": 1723 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.91564860074222e-05, |
|
"loss": 0.0063, |
|
"step": 1724 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 2.913594712304945e-05, |
|
"loss": 0.0055, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.9115405368149717e-05, |
|
"loss": 0.0026, |
|
"step": 1726 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.9094860756979876e-05, |
|
"loss": 0.0017, |
|
"step": 1727 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.907431330379877e-05, |
|
"loss": 0.0021, |
|
"step": 1728 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.9053763022867235e-05, |
|
"loss": 0.0065, |
|
"step": 1729 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.9033209928448056e-05, |
|
"loss": 0.0078, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.9012654034805975e-05, |
|
"loss": 0.0014, |
|
"step": 1731 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 2.899209535620767e-05, |
|
"loss": 0.0056, |
|
"step": 1732 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.897153390692176e-05, |
|
"loss": 0.002, |
|
"step": 1733 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.8950969701218783e-05, |
|
"loss": 0.0025, |
|
"step": 1734 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.8930402753371194e-05, |
|
"loss": 0.0037, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.8909833077653347e-05, |
|
"loss": 0.0109, |
|
"step": 1736 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.8889260688341486e-05, |
|
"loss": 0.0015, |
|
"step": 1737 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.8868685599713746e-05, |
|
"loss": 0.0037, |
|
"step": 1738 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.884810782605014e-05, |
|
"loss": 0.0038, |
|
"step": 1739 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.8827527381632523e-05, |
|
"loss": 0.0096, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 2.880694428074463e-05, |
|
"loss": 0.0049, |
|
"step": 1741 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 2.878635853767202e-05, |
|
"loss": 0.0011, |
|
"step": 1742 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 2.8765770166702087e-05, |
|
"loss": 0.0049, |
|
"step": 1743 |
|
}, |
|
{ |
|
"epoch": 4.57, |
|
"learning_rate": 2.8745179182124077e-05, |
|
"loss": 0.0062, |
|
"step": 1744 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.8724585598229005e-05, |
|
"loss": 0.0057, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.8703989429309723e-05, |
|
"loss": 0.0076, |
|
"step": 1746 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.868339068966086e-05, |
|
"loss": 0.0008, |
|
"step": 1747 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.8662789393578852e-05, |
|
"loss": 0.0009, |
|
"step": 1748 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 2.864218555536188e-05, |
|
"loss": 0.003, |
|
"step": 1749 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 2.86215791893099e-05, |
|
"loss": 0.0052, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"eval_loss": 0.01769648864865303, |
|
"eval_runtime": 27.02, |
|
"eval_samples_per_second": 22.206, |
|
"eval_steps_per_second": 2.776, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 2.8600970309724628e-05, |
|
"loss": 0.0095, |
|
"step": 1751 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.8580358930909535e-05, |
|
"loss": 0.0029, |
|
"step": 1752 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.8559745067169798e-05, |
|
"loss": 0.0071, |
|
"step": 1753 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.8539128732812336e-05, |
|
"loss": 0.0044, |
|
"step": 1754 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.851850994214579e-05, |
|
"loss": 0.0079, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 2.8497888709480493e-05, |
|
"loss": 0.001, |
|
"step": 1756 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 2.8477265049128467e-05, |
|
"loss": 0.0043, |
|
"step": 1757 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 2.8456638975403428e-05, |
|
"loss": 0.0037, |
|
"step": 1758 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 2.843601050262078e-05, |
|
"loss": 0.0063, |
|
"step": 1759 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.8415379645097574e-05, |
|
"loss": 0.0054, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.8394746417152522e-05, |
|
"loss": 0.0037, |
|
"step": 1761 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.837411083310598e-05, |
|
"loss": 0.0023, |
|
"step": 1762 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.8353472907279933e-05, |
|
"loss": 0.0046, |
|
"step": 1763 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.833283265399801e-05, |
|
"loss": 0.0054, |
|
"step": 1764 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.831219008758544e-05, |
|
"loss": 0.0081, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.8291545222369047e-05, |
|
"loss": 0.0061, |
|
"step": 1766 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.8270898072677282e-05, |
|
"loss": 0.003, |
|
"step": 1767 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 2.8250248652840155e-05, |
|
"loss": 0.0015, |
|
"step": 1768 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 2.822959697718927e-05, |
|
"loss": 0.0024, |
|
"step": 1769 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 2.8208943060057775e-05, |
|
"loss": 0.006, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.8188286915780392e-05, |
|
"loss": 0.0043, |
|
"step": 1771 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.8167628558693375e-05, |
|
"loss": 0.0066, |
|
"step": 1772 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.814696800313454e-05, |
|
"loss": 0.0058, |
|
"step": 1773 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.8126305263443187e-05, |
|
"loss": 0.0026, |
|
"step": 1774 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.810564035396017e-05, |
|
"loss": 0.0019, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.8084973289027827e-05, |
|
"loss": 0.0042, |
|
"step": 1776 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.806430408299e-05, |
|
"loss": 0.0061, |
|
"step": 1777 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.8043632750192027e-05, |
|
"loss": 0.0027, |
|
"step": 1778 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.8022959304980695e-05, |
|
"loss": 0.0054, |
|
"step": 1779 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.8002283761704273e-05, |
|
"loss": 0.0028, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.7981606134712502e-05, |
|
"loss": 0.0049, |
|
"step": 1781 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 2.7960926438356545e-05, |
|
"loss": 0.0037, |
|
"step": 1782 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.7940244686989004e-05, |
|
"loss": 0.0033, |
|
"step": 1783 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.7919560894963913e-05, |
|
"loss": 0.0058, |
|
"step": 1784 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.7898875076636732e-05, |
|
"loss": 0.0074, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.7878187246364318e-05, |
|
"loss": 0.0038, |
|
"step": 1786 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 2.7857497418504912e-05, |
|
"loss": 0.0057, |
|
"step": 1787 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 2.783680560741816e-05, |
|
"loss": 0.0042, |
|
"step": 1788 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 2.7816111827465073e-05, |
|
"loss": 0.0026, |
|
"step": 1789 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.7795416093008048e-05, |
|
"loss": 0.0073, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.7774718418410813e-05, |
|
"loss": 0.0083, |
|
"step": 1791 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.7754018818038445e-05, |
|
"loss": 0.0077, |
|
"step": 1792 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.7733317306257377e-05, |
|
"loss": 0.0064, |
|
"step": 1793 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.7712613897435357e-05, |
|
"loss": 0.0043, |
|
"step": 1794 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.7691908605941458e-05, |
|
"loss": 0.0023, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.7671201446146033e-05, |
|
"loss": 0.0025, |
|
"step": 1796 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 2.765049243242076e-05, |
|
"loss": 0.0121, |
|
"step": 1797 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.762978157913859e-05, |
|
"loss": 0.0101, |
|
"step": 1798 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.7609068900673767e-05, |
|
"loss": 0.0025, |
|
"step": 1799 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.7588354411401777e-05, |
|
"loss": 0.0017, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"eval_loss": 0.017755404114723206, |
|
"eval_runtime": 27.037, |
|
"eval_samples_per_second": 22.192, |
|
"eval_steps_per_second": 2.774, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"learning_rate": 2.756763812569938e-05, |
|
"loss": 0.0054, |
|
"step": 1801 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.7546920057944582e-05, |
|
"loss": 0.0008, |
|
"step": 1802 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.7526200222516617e-05, |
|
"loss": 0.0053, |
|
"step": 1803 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.750547863379595e-05, |
|
"loss": 0.0084, |
|
"step": 1804 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.748475530616427e-05, |
|
"loss": 0.0023, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.7464030254004458e-05, |
|
"loss": 0.0043, |
|
"step": 1806 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.7443303491700607e-05, |
|
"loss": 0.0047, |
|
"step": 1807 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.742257503363799e-05, |
|
"loss": 0.0022, |
|
"step": 1808 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.7401844894203056e-05, |
|
"loss": 0.0093, |
|
"step": 1809 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 2.738111308778342e-05, |
|
"loss": 0.0093, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 2.7360379628767857e-05, |
|
"loss": 0.002, |
|
"step": 1811 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 2.73396445315463e-05, |
|
"loss": 0.004, |
|
"step": 1812 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 2.731890781050979e-05, |
|
"loss": 0.0051, |
|
"step": 1813 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 2.7298169480050516e-05, |
|
"loss": 0.0054, |
|
"step": 1814 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 2.727742955456179e-05, |
|
"loss": 0.0146, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 2.7256688048438016e-05, |
|
"loss": 0.0082, |
|
"step": 1816 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.7235944976074695e-05, |
|
"loss": 0.0039, |
|
"step": 1817 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.721520035186843e-05, |
|
"loss": 0.0016, |
|
"step": 1818 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.7194454190216877e-05, |
|
"loss": 0.0014, |
|
"step": 1819 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.7173706505518797e-05, |
|
"loss": 0.0034, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 2.715295731217396e-05, |
|
"loss": 0.0015, |
|
"step": 1821 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 2.7132206624583217e-05, |
|
"loss": 0.0042, |
|
"step": 1822 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 2.711145445714845e-05, |
|
"loss": 0.003, |
|
"step": 1823 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 2.7090700824272557e-05, |
|
"loss": 0.001, |
|
"step": 1824 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 2.7069945740359478e-05, |
|
"loss": 0.0029, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 2.7049189219814125e-05, |
|
"loss": 0.006, |
|
"step": 1826 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 2.7028431277042425e-05, |
|
"loss": 0.0035, |
|
"step": 1827 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 2.7007671926451305e-05, |
|
"loss": 0.0018, |
|
"step": 1828 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.6986911182448648e-05, |
|
"loss": 0.0025, |
|
"step": 1829 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.6966149059443312e-05, |
|
"loss": 0.0013, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.6945385571845113e-05, |
|
"loss": 0.0074, |
|
"step": 1831 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 2.6924620734064815e-05, |
|
"loss": 0.003, |
|
"step": 1832 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 2.6903854560514118e-05, |
|
"loss": 0.0058, |
|
"step": 1833 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 2.6883087065605644e-05, |
|
"loss": 0.0067, |
|
"step": 1834 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 2.6862318263752946e-05, |
|
"loss": 0.0024, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 2.6841548169370457e-05, |
|
"loss": 0.0046, |
|
"step": 1836 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 2.6820776796873543e-05, |
|
"loss": 0.0048, |
|
"step": 1837 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 2.680000416067844e-05, |
|
"loss": 0.0032, |
|
"step": 1838 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 2.6779230275202243e-05, |
|
"loss": 0.0069, |
|
"step": 1839 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 2.6758455154862937e-05, |
|
"loss": 0.0041, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 2.673767881407937e-05, |
|
"loss": 0.0076, |
|
"step": 1841 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 2.6716901267271215e-05, |
|
"loss": 0.0033, |
|
"step": 1842 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 2.669612252885899e-05, |
|
"loss": 0.0044, |
|
"step": 1843 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.6675342613264048e-05, |
|
"loss": 0.0027, |
|
"step": 1844 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.6654561534908544e-05, |
|
"loss": 0.0028, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.6633779308215467e-05, |
|
"loss": 0.004, |
|
"step": 1846 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.6612995947608566e-05, |
|
"loss": 0.0031, |
|
"step": 1847 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 2.6592211467512408e-05, |
|
"loss": 0.005, |
|
"step": 1848 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 2.657142588235232e-05, |
|
"loss": 0.0021, |
|
"step": 1849 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 2.6550639206554405e-05, |
|
"loss": 0.0098, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"eval_loss": 0.016997970640659332, |
|
"eval_runtime": 26.9876, |
|
"eval_samples_per_second": 22.232, |
|
"eval_steps_per_second": 2.779, |
|
"step": 1850 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 3810, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 50, |
|
"total_flos": 2.1215423471026176e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|