JW17's picture
Add files using upload-large-folder tool
0424857 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"Batch Sum": 52.814453125,
"accuracy": 0.546875,
"epoch": 0,
"step": 0
},
{
"Batch Sum": 52.524169921875,
"accuracy": 0.40625,
"epoch": 0,
"step": 0
},
{
"epoch": 0.0025,
"grad_norm": 12.003690719604492,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.714,
"step": 1
},
{
"Batch Sum": 52.9404296875,
"accuracy": 0.515625,
"epoch": 0.0025,
"step": 1
},
{
"Batch Sum": 48.3984375,
"accuracy": 0.625,
"epoch": 0.0025,
"step": 1
},
{
"epoch": 0.005,
"grad_norm": 14.317296981811523,
"learning_rate": 5.000000000000001e-07,
"loss": 0.6849,
"step": 2
},
{
"Batch Sum": 52.79443359375,
"accuracy": 0.484375,
"epoch": 0.005,
"step": 2
},
{
"Batch Sum": 53.980224609375,
"accuracy": 0.578125,
"epoch": 0.005,
"step": 2
},
{
"epoch": 0.0075,
"grad_norm": 21.00113296508789,
"learning_rate": 7.5e-07,
"loss": 0.7106,
"step": 3
},
{
"Batch Sum": 53.60833740234375,
"accuracy": 0.453125,
"epoch": 0.0075,
"step": 3
},
{
"Batch Sum": 52.896728515625,
"accuracy": 0.578125,
"epoch": 0.0075,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 13.431326866149902,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.6978,
"step": 4
},
{
"Batch Sum": 54.1025390625,
"accuracy": 0.65625,
"epoch": 0.01,
"step": 4
},
{
"Batch Sum": 54.279052734375,
"accuracy": 0.53125,
"epoch": 0.01,
"step": 4
},
{
"epoch": 0.0125,
"grad_norm": 13.951983451843262,
"learning_rate": 1.25e-06,
"loss": 0.6883,
"step": 5
},
{
"Batch Sum": 53.18408203125,
"accuracy": 0.578125,
"epoch": 0.0125,
"step": 5
},
{
"Batch Sum": 49.6630859375,
"accuracy": 0.53125,
"epoch": 0.0125,
"step": 5
},
{
"epoch": 0.015,
"grad_norm": 11.490130424499512,
"learning_rate": 1.5e-06,
"loss": 0.6818,
"step": 6
},
{
"Batch Sum": 54.36865234375,
"accuracy": 0.546875,
"epoch": 0.015,
"step": 6
},
{
"Batch Sum": 53.27685546875,
"accuracy": 0.515625,
"epoch": 0.015,
"step": 6
},
{
"epoch": 0.0175,
"grad_norm": 18.882749557495117,
"learning_rate": 1.75e-06,
"loss": 0.7198,
"step": 7
},
{
"Batch Sum": 58.3974609375,
"accuracy": 0.421875,
"epoch": 0.0175,
"step": 7
},
{
"Batch Sum": 56.97265625,
"accuracy": 0.6875,
"epoch": 0.0175,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 12.89813232421875,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6877,
"step": 8
},
{
"Batch Sum": 56.07861328125,
"accuracy": 0.625,
"epoch": 0.02,
"step": 8
},
{
"Batch Sum": 57.08154296875,
"accuracy": 0.609375,
"epoch": 0.02,
"step": 8
},
{
"epoch": 0.0225,
"grad_norm": 16.434814453125,
"learning_rate": 2.25e-06,
"loss": 0.6739,
"step": 9
},
{
"Batch Sum": 59.09326171875,
"accuracy": 0.453125,
"epoch": 0.0225,
"step": 9
},
{
"Batch Sum": 61.3369140625,
"accuracy": 0.546875,
"epoch": 0.0225,
"step": 9
},
{
"epoch": 0.025,
"grad_norm": 13.62090015411377,
"learning_rate": 2.5e-06,
"loss": 0.6856,
"step": 10
},
{
"Batch Sum": 70.0703125,
"accuracy": 0.609375,
"epoch": 0.025,
"step": 10
},
{
"Batch Sum": 64.751953125,
"accuracy": 0.578125,
"epoch": 0.025,
"step": 10
},
{
"epoch": 0.0275,
"grad_norm": 13.072342872619629,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.6546,
"step": 11
},
{
"Batch Sum": 73.357421875,
"accuracy": 0.609375,
"epoch": 0.0275,
"step": 11
},
{
"Batch Sum": 72.185546875,
"accuracy": 0.578125,
"epoch": 0.0275,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 13.209249496459961,
"learning_rate": 3e-06,
"loss": 0.6519,
"step": 12
},
{
"Batch Sum": 77.0615234375,
"accuracy": 0.703125,
"epoch": 0.03,
"step": 12
},
{
"Batch Sum": 78.0577392578125,
"accuracy": 0.703125,
"epoch": 0.03,
"step": 12
},
{
"epoch": 0.0325,
"grad_norm": 12.592546463012695,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.6092,
"step": 13
},
{
"Batch Sum": 91.876953125,
"accuracy": 0.625,
"epoch": 0.0325,
"step": 13
},
{
"Batch Sum": 87.7666015625,
"accuracy": 0.6875,
"epoch": 0.0325,
"step": 13
},
{
"epoch": 0.035,
"grad_norm": 14.43427848815918,
"learning_rate": 3.5e-06,
"loss": 0.601,
"step": 14
},
{
"Batch Sum": 97.5859375,
"accuracy": 0.65625,
"epoch": 0.035,
"step": 14
},
{
"Batch Sum": 107.59765625,
"accuracy": 0.71875,
"epoch": 0.035,
"step": 14
},
{
"epoch": 0.0375,
"grad_norm": 12.276965141296387,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.6045,
"step": 15
},
{
"Batch Sum": 111.8828125,
"accuracy": 0.59375,
"epoch": 0.0375,
"step": 15
},
{
"Batch Sum": 116.759765625,
"accuracy": 0.6875,
"epoch": 0.0375,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 12.269201278686523,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6262,
"step": 16
},
{
"Batch Sum": 120.9140625,
"accuracy": 0.625,
"epoch": 0.04,
"step": 16
},
{
"Batch Sum": 125.150390625,
"accuracy": 0.703125,
"epoch": 0.04,
"step": 16
},
{
"epoch": 0.0425,
"grad_norm": 12.212128639221191,
"learning_rate": 4.25e-06,
"loss": 0.6265,
"step": 17
},
{
"Batch Sum": 136.623046875,
"accuracy": 0.71875,
"epoch": 0.0425,
"step": 17
},
{
"Batch Sum": 133.275390625,
"accuracy": 0.640625,
"epoch": 0.0425,
"step": 17
},
{
"epoch": 0.045,
"grad_norm": 17.11771583557129,
"learning_rate": 4.5e-06,
"loss": 0.596,
"step": 18
},
{
"Batch Sum": 155.64453125,
"accuracy": 0.71875,
"epoch": 0.045,
"step": 18
},
{
"Batch Sum": 152.97265625,
"accuracy": 0.671875,
"epoch": 0.045,
"step": 18
},
{
"epoch": 0.0475,
"grad_norm": 17.395545959472656,
"learning_rate": 4.75e-06,
"loss": 0.6295,
"step": 19
},
{
"Batch Sum": 173.0703125,
"accuracy": 0.65625,
"epoch": 0.0475,
"step": 19
},
{
"Batch Sum": 173.05078125,
"accuracy": 0.671875,
"epoch": 0.0475,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 24.041831970214844,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 20
},
{
"Batch Sum": 191.30859375,
"accuracy": 0.765625,
"epoch": 0.05,
"step": 20
},
{
"Batch Sum": 196.74609375,
"accuracy": 0.6875,
"epoch": 0.05,
"step": 20
},
{
"epoch": 0.0525,
"grad_norm": 13.471965789794922,
"learning_rate": 4.986842105263158e-06,
"loss": 0.527,
"step": 21
},
{
"Batch Sum": 205.40625,
"accuracy": 0.703125,
"epoch": 0.0525,
"step": 21
},
{
"Batch Sum": 204.26953125,
"accuracy": 0.703125,
"epoch": 0.0525,
"step": 21
},
{
"epoch": 0.055,
"grad_norm": 22.169654846191406,
"learning_rate": 4.973684210526316e-06,
"loss": 0.6532,
"step": 22
},
{
"Batch Sum": 208.29296875,
"accuracy": 0.578125,
"epoch": 0.055,
"step": 22
},
{
"Batch Sum": 208.50390625,
"accuracy": 0.75,
"epoch": 0.055,
"step": 22
},
{
"epoch": 0.0575,
"grad_norm": 21.459922790527344,
"learning_rate": 4.960526315789474e-06,
"loss": 0.6456,
"step": 23
},
{
"Batch Sum": 207.7421875,
"accuracy": 0.78125,
"epoch": 0.0575,
"step": 23
},
{
"Batch Sum": 205.7421875,
"accuracy": 0.625,
"epoch": 0.0575,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 12.531883239746094,
"learning_rate": 4.947368421052632e-06,
"loss": 0.5219,
"step": 24
},
{
"Batch Sum": 211.6640625,
"accuracy": 0.71875,
"epoch": 0.06,
"step": 24
},
{
"Batch Sum": 208.45703125,
"accuracy": 0.703125,
"epoch": 0.06,
"step": 24
},
{
"epoch": 0.0625,
"grad_norm": 16.02347755432129,
"learning_rate": 4.9342105263157895e-06,
"loss": 0.5895,
"step": 25
},
{
"Batch Sum": 213.375,
"accuracy": 0.640625,
"epoch": 0.0625,
"step": 25
},
{
"Batch Sum": 203.58203125,
"accuracy": 0.703125,
"epoch": 0.0625,
"step": 25
},
{
"epoch": 0.065,
"grad_norm": 12.315845489501953,
"learning_rate": 4.921052631578948e-06,
"loss": 0.5732,
"step": 26
},
{
"Batch Sum": 202.23828125,
"accuracy": 0.734375,
"epoch": 0.065,
"step": 26
},
{
"Batch Sum": 197.6953125,
"accuracy": 0.703125,
"epoch": 0.065,
"step": 26
},
{
"epoch": 0.0675,
"grad_norm": 10.496895790100098,
"learning_rate": 4.907894736842106e-06,
"loss": 0.5467,
"step": 27
},
{
"Batch Sum": 183.37890625,
"accuracy": 0.703125,
"epoch": 0.0675,
"step": 27
},
{
"Batch Sum": 185.86328125,
"accuracy": 0.75,
"epoch": 0.0675,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 10.82455825805664,
"learning_rate": 4.894736842105264e-06,
"loss": 0.5028,
"step": 28
},
{
"Batch Sum": 179.2158203125,
"accuracy": 0.71875,
"epoch": 0.07,
"step": 28
},
{
"Batch Sum": 179.328125,
"accuracy": 0.734375,
"epoch": 0.07,
"step": 28
},
{
"epoch": 0.0725,
"grad_norm": 13.485640525817871,
"learning_rate": 4.881578947368422e-06,
"loss": 0.5516,
"step": 29
},
{
"Batch Sum": 174.9296875,
"accuracy": 0.703125,
"epoch": 0.0725,
"step": 29
},
{
"Batch Sum": 172.44921875,
"accuracy": 0.734375,
"epoch": 0.0725,
"step": 29
},
{
"epoch": 0.075,
"grad_norm": 11.573375701904297,
"learning_rate": 4.8684210526315795e-06,
"loss": 0.5523,
"step": 30
},
{
"Batch Sum": 170.3046875,
"accuracy": 0.671875,
"epoch": 0.075,
"step": 30
},
{
"Batch Sum": 174.54296875,
"accuracy": 0.703125,
"epoch": 0.075,
"step": 30
},
{
"epoch": 0.0775,
"grad_norm": 10.271368026733398,
"learning_rate": 4.855263157894737e-06,
"loss": 0.5549,
"step": 31
},
{
"Batch Sum": 168.15625,
"accuracy": 0.75,
"epoch": 0.0775,
"step": 31
},
{
"Batch Sum": 169.49609375,
"accuracy": 0.671875,
"epoch": 0.0775,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 9.423661231994629,
"learning_rate": 4.842105263157895e-06,
"loss": 0.5279,
"step": 32
},
{
"Batch Sum": 166.53125,
"accuracy": 0.765625,
"epoch": 0.08,
"step": 32
},
{
"Batch Sum": 169.66015625,
"accuracy": 0.75,
"epoch": 0.08,
"step": 32
},
{
"epoch": 0.0825,
"grad_norm": 11.485343933105469,
"learning_rate": 4.828947368421053e-06,
"loss": 0.5109,
"step": 33
},
{
"Batch Sum": 169.38671875,
"accuracy": 0.6875,
"epoch": 0.0825,
"step": 33
},
{
"Batch Sum": 172.9375,
"accuracy": 0.796875,
"epoch": 0.0825,
"step": 33
},
{
"epoch": 0.085,
"grad_norm": 10.612264633178711,
"learning_rate": 4.815789473684211e-06,
"loss": 0.555,
"step": 34
},
{
"Batch Sum": 179.78515625,
"accuracy": 0.71875,
"epoch": 0.085,
"step": 34
},
{
"Batch Sum": 173.041015625,
"accuracy": 0.75,
"epoch": 0.085,
"step": 34
},
{
"epoch": 0.0875,
"grad_norm": 9.249446868896484,
"learning_rate": 4.802631578947369e-06,
"loss": 0.528,
"step": 35
},
{
"Batch Sum": 169.671875,
"accuracy": 0.765625,
"epoch": 0.0875,
"step": 35
},
{
"Batch Sum": 184.578125,
"accuracy": 0.734375,
"epoch": 0.0875,
"step": 35
},
{
"epoch": 0.09,
"grad_norm": 8.817283630371094,
"learning_rate": 4.789473684210527e-06,
"loss": 0.452,
"step": 36
},
{
"Batch Sum": 182.33984375,
"accuracy": 0.765625,
"epoch": 0.09,
"step": 36
},
{
"Batch Sum": 190.103515625,
"accuracy": 0.75,
"epoch": 0.09,
"step": 36
},
{
"epoch": 0.0925,
"grad_norm": 18.01405906677246,
"learning_rate": 4.7763157894736844e-06,
"loss": 0.5748,
"step": 37
},
{
"Batch Sum": 192.9296875,
"accuracy": 0.796875,
"epoch": 0.0925,
"step": 37
},
{
"Batch Sum": 195.75390625,
"accuracy": 0.734375,
"epoch": 0.0925,
"step": 37
},
{
"epoch": 0.095,
"grad_norm": 10.098393440246582,
"learning_rate": 4.763157894736842e-06,
"loss": 0.4995,
"step": 38
},
{
"Batch Sum": 199.21484375,
"accuracy": 0.75,
"epoch": 0.095,
"step": 38
},
{
"Batch Sum": 204.1640625,
"accuracy": 0.703125,
"epoch": 0.095,
"step": 38
},
{
"epoch": 0.0975,
"grad_norm": 13.419852256774902,
"learning_rate": 4.75e-06,
"loss": 0.5398,
"step": 39
},
{
"Batch Sum": 209.2578125,
"accuracy": 0.71875,
"epoch": 0.0975,
"step": 39
},
{
"Batch Sum": 213.6796875,
"accuracy": 0.640625,
"epoch": 0.0975,
"step": 39
},
{
"epoch": 0.1,
"grad_norm": 12.879420280456543,
"learning_rate": 4.736842105263158e-06,
"loss": 0.5389,
"step": 40
},
{
"Batch Sum": 220.0625,
"accuracy": 0.734375,
"epoch": 0.1,
"step": 40
},
{
"Batch Sum": 214.107421875,
"accuracy": 0.703125,
"epoch": 0.1,
"step": 40
},
{
"epoch": 0.1025,
"grad_norm": 10.593208312988281,
"learning_rate": 4.723684210526316e-06,
"loss": 0.4675,
"step": 41
},
{
"Batch Sum": 225.80078125,
"accuracy": 0.796875,
"epoch": 0.1025,
"step": 41
},
{
"Batch Sum": 218.39453125,
"accuracy": 0.765625,
"epoch": 0.1025,
"step": 41
},
{
"epoch": 0.105,
"grad_norm": 10.301755905151367,
"learning_rate": 4.710526315789474e-06,
"loss": 0.4833,
"step": 42
},
{
"Batch Sum": 234.8203125,
"accuracy": 0.765625,
"epoch": 0.105,
"step": 42
},
{
"Batch Sum": 229.140625,
"accuracy": 0.734375,
"epoch": 0.105,
"step": 42
},
{
"epoch": 0.1075,
"grad_norm": 11.743901252746582,
"learning_rate": 4.697368421052632e-06,
"loss": 0.5026,
"step": 43
},
{
"Batch Sum": 226.81640625,
"accuracy": 0.734375,
"epoch": 0.1075,
"step": 43
},
{
"Batch Sum": 225.296875,
"accuracy": 0.75,
"epoch": 0.1075,
"step": 43
},
{
"epoch": 0.11,
"grad_norm": 13.593162536621094,
"learning_rate": 4.68421052631579e-06,
"loss": 0.5254,
"step": 44
},
{
"Batch Sum": 227.609375,
"accuracy": 0.703125,
"epoch": 0.11,
"step": 44
},
{
"Batch Sum": 216.0625,
"accuracy": 0.765625,
"epoch": 0.11,
"step": 44
},
{
"epoch": 0.1125,
"grad_norm": 11.014060974121094,
"learning_rate": 4.671052631578948e-06,
"loss": 0.4679,
"step": 45
},
{
"Batch Sum": 227.234375,
"accuracy": 0.640625,
"epoch": 0.1125,
"step": 45
},
{
"Batch Sum": 222.55078125,
"accuracy": 0.734375,
"epoch": 0.1125,
"step": 45
},
{
"epoch": 0.115,
"grad_norm": 14.091309547424316,
"learning_rate": 4.657894736842106e-06,
"loss": 0.5738,
"step": 46
},
{
"Batch Sum": 217.4609375,
"accuracy": 0.6875,
"epoch": 0.115,
"step": 46
},
{
"Batch Sum": 224.85546875,
"accuracy": 0.671875,
"epoch": 0.115,
"step": 46
},
{
"epoch": 0.1175,
"grad_norm": 12.615903854370117,
"learning_rate": 4.6447368421052635e-06,
"loss": 0.5464,
"step": 47
},
{
"Batch Sum": 211.90625,
"accuracy": 0.75,
"epoch": 0.1175,
"step": 47
},
{
"Batch Sum": 207.115234375,
"accuracy": 0.734375,
"epoch": 0.1175,
"step": 47
},
{
"epoch": 0.12,
"grad_norm": 10.995285987854004,
"learning_rate": 4.631578947368421e-06,
"loss": 0.4911,
"step": 48
},
{
"Batch Sum": 198.1484375,
"accuracy": 0.734375,
"epoch": 0.12,
"step": 48
},
{
"Batch Sum": 195.65234375,
"accuracy": 0.765625,
"epoch": 0.12,
"step": 48
},
{
"epoch": 0.1225,
"grad_norm": 12.166397094726562,
"learning_rate": 4.618421052631579e-06,
"loss": 0.4656,
"step": 49
},
{
"Batch Sum": 192.26953125,
"accuracy": 0.703125,
"epoch": 0.1225,
"step": 49
},
{
"Batch Sum": 187.31640625,
"accuracy": 0.78125,
"epoch": 0.1225,
"step": 49
},
{
"epoch": 0.125,
"grad_norm": 12.223002433776855,
"learning_rate": 4.605263157894737e-06,
"loss": 0.5363,
"step": 50
},
{
"Batch Sum": 192.75390625,
"accuracy": 0.796875,
"epoch": 0.125,
"step": 50
},
{
"Batch Sum": 182.095703125,
"accuracy": 0.734375,
"epoch": 0.125,
"step": 50
},
{
"epoch": 0.1275,
"grad_norm": 10.82153606414795,
"learning_rate": 4.592105263157895e-06,
"loss": 0.4713,
"step": 51
},
{
"Batch Sum": 187.1015625,
"accuracy": 0.71875,
"epoch": 0.1275,
"step": 51
},
{
"Batch Sum": 185.41796875,
"accuracy": 0.703125,
"epoch": 0.1275,
"step": 51
},
{
"epoch": 0.13,
"grad_norm": 13.1001558303833,
"learning_rate": 4.578947368421053e-06,
"loss": 0.5496,
"step": 52
},
{
"Batch Sum": 199.91015625,
"accuracy": 0.765625,
"epoch": 0.13,
"step": 52
},
{
"Batch Sum": 202.24609375,
"accuracy": 0.734375,
"epoch": 0.13,
"step": 52
},
{
"epoch": 0.1325,
"grad_norm": 11.745606422424316,
"learning_rate": 4.565789473684211e-06,
"loss": 0.4841,
"step": 53
},
{
"Batch Sum": 197.15625,
"accuracy": 0.765625,
"epoch": 0.1325,
"step": 53
},
{
"Batch Sum": 200.75,
"accuracy": 0.71875,
"epoch": 0.1325,
"step": 53
},
{
"epoch": 0.135,
"grad_norm": 12.604297637939453,
"learning_rate": 4.552631578947369e-06,
"loss": 0.4502,
"step": 54
},
{
"Batch Sum": 214.22265625,
"accuracy": 0.6875,
"epoch": 0.135,
"step": 54
},
{
"Batch Sum": 207.84765625,
"accuracy": 0.78125,
"epoch": 0.135,
"step": 54
},
{
"epoch": 0.1375,
"grad_norm": 14.610075950622559,
"learning_rate": 4.539473684210527e-06,
"loss": 0.5449,
"step": 55
},
{
"Batch Sum": 221.5546875,
"accuracy": 0.875,
"epoch": 0.1375,
"step": 55
},
{
"Batch Sum": 236.11328125,
"accuracy": 0.765625,
"epoch": 0.1375,
"step": 55
},
{
"epoch": 0.14,
"grad_norm": 11.892423629760742,
"learning_rate": 4.526315789473685e-06,
"loss": 0.4002,
"step": 56
},
{
"Batch Sum": 246.265625,
"accuracy": 0.734375,
"epoch": 0.14,
"step": 56
},
{
"Batch Sum": 233.033203125,
"accuracy": 0.828125,
"epoch": 0.14,
"step": 56
},
{
"epoch": 0.1425,
"grad_norm": 13.1007719039917,
"learning_rate": 4.513157894736843e-06,
"loss": 0.4596,
"step": 57
},
{
"Batch Sum": 239.265625,
"accuracy": 0.78125,
"epoch": 0.1425,
"step": 57
},
{
"Batch Sum": 251.70703125,
"accuracy": 0.6875,
"epoch": 0.1425,
"step": 57
},
{
"epoch": 0.145,
"grad_norm": 14.813791275024414,
"learning_rate": 4.5e-06,
"loss": 0.5175,
"step": 58
},
{
"Batch Sum": 236.10546875,
"accuracy": 0.765625,
"epoch": 0.145,
"step": 58
},
{
"Batch Sum": 249.8046875,
"accuracy": 0.734375,
"epoch": 0.145,
"step": 58
},
{
"epoch": 0.1475,
"grad_norm": 14.486608505249023,
"learning_rate": 4.4868421052631584e-06,
"loss": 0.5035,
"step": 59
},
{
"Batch Sum": 241.12109375,
"accuracy": 0.78125,
"epoch": 0.1475,
"step": 59
},
{
"Batch Sum": 229.68359375,
"accuracy": 0.796875,
"epoch": 0.1475,
"step": 59
},
{
"epoch": 0.15,
"grad_norm": 12.024293899536133,
"learning_rate": 4.473684210526316e-06,
"loss": 0.4143,
"step": 60
},
{
"Batch Sum": 251.0,
"accuracy": 0.796875,
"epoch": 0.15,
"step": 60
},
{
"Batch Sum": 245.59765625,
"accuracy": 0.78125,
"epoch": 0.15,
"step": 60
},
{
"epoch": 0.1525,
"grad_norm": 11.371909141540527,
"learning_rate": 4.460526315789474e-06,
"loss": 0.3862,
"step": 61
},
{
"Batch Sum": 239.98828125,
"accuracy": 0.875,
"epoch": 0.1525,
"step": 61
},
{
"Batch Sum": 243.82421875,
"accuracy": 0.78125,
"epoch": 0.1525,
"step": 61
},
{
"epoch": 0.155,
"grad_norm": 11.827494621276855,
"learning_rate": 4.447368421052632e-06,
"loss": 0.3865,
"step": 62
},
{
"Batch Sum": 230.6640625,
"accuracy": 0.734375,
"epoch": 0.155,
"step": 62
},
{
"Batch Sum": 221.58984375,
"accuracy": 0.796875,
"epoch": 0.155,
"step": 62
},
{
"epoch": 0.1575,
"grad_norm": 11.711631774902344,
"learning_rate": 4.43421052631579e-06,
"loss": 0.4655,
"step": 63
},
{
"Batch Sum": 232.0703125,
"accuracy": 0.75,
"epoch": 0.1575,
"step": 63
},
{
"Batch Sum": 224.037109375,
"accuracy": 0.8125,
"epoch": 0.1575,
"step": 63
},
{
"epoch": 0.16,
"grad_norm": 11.687829971313477,
"learning_rate": 4.4210526315789476e-06,
"loss": 0.4065,
"step": 64
},
{
"Batch Sum": 232.53515625,
"accuracy": 0.671875,
"epoch": 0.16,
"step": 64
},
{
"Batch Sum": 225.68359375,
"accuracy": 0.734375,
"epoch": 0.16,
"step": 64
},
{
"epoch": 0.1625,
"grad_norm": 13.95382022857666,
"learning_rate": 4.407894736842105e-06,
"loss": 0.5705,
"step": 65
},
{
"Batch Sum": 237.1796875,
"accuracy": 0.859375,
"epoch": 0.1625,
"step": 65
},
{
"Batch Sum": 234.703125,
"accuracy": 0.84375,
"epoch": 0.1625,
"step": 65
},
{
"epoch": 0.165,
"grad_norm": 10.754307746887207,
"learning_rate": 4.394736842105263e-06,
"loss": 0.3935,
"step": 66
},
{
"Batch Sum": 216.4921875,
"accuracy": 0.8125,
"epoch": 0.165,
"step": 66
},
{
"Batch Sum": 238.1640625,
"accuracy": 0.828125,
"epoch": 0.165,
"step": 66
},
{
"epoch": 0.1675,
"grad_norm": 11.184181213378906,
"learning_rate": 4.381578947368421e-06,
"loss": 0.3747,
"step": 67
},
{
"Batch Sum": 211.755859375,
"accuracy": 0.828125,
"epoch": 0.1675,
"step": 67
},
{
"Batch Sum": 225.146484375,
"accuracy": 0.84375,
"epoch": 0.1675,
"step": 67
},
{
"epoch": 0.17,
"grad_norm": 10.842012405395508,
"learning_rate": 4.368421052631579e-06,
"loss": 0.3372,
"step": 68
},
{
"Batch Sum": 227.8291015625,
"accuracy": 0.796875,
"epoch": 0.17,
"step": 68
},
{
"Batch Sum": 214.9453125,
"accuracy": 0.78125,
"epoch": 0.17,
"step": 68
},
{
"epoch": 0.1725,
"grad_norm": 12.21465015411377,
"learning_rate": 4.3552631578947375e-06,
"loss": 0.4673,
"step": 69
},
{
"Batch Sum": 230.5546875,
"accuracy": 0.859375,
"epoch": 0.1725,
"step": 69
},
{
"Batch Sum": 229.95703125,
"accuracy": 0.796875,
"epoch": 0.1725,
"step": 69
},
{
"epoch": 0.175,
"grad_norm": 12.969797134399414,
"learning_rate": 4.342105263157895e-06,
"loss": 0.4088,
"step": 70
},
{
"Batch Sum": 224.419921875,
"accuracy": 0.8125,
"epoch": 0.175,
"step": 70
},
{
"Batch Sum": 221.15234375,
"accuracy": 0.859375,
"epoch": 0.175,
"step": 70
},
{
"epoch": 0.1775,
"grad_norm": 12.190828323364258,
"learning_rate": 4.328947368421053e-06,
"loss": 0.4531,
"step": 71
},
{
"Batch Sum": 212.4345703125,
"accuracy": 0.78125,
"epoch": 0.1775,
"step": 71
},
{
"Batch Sum": 209.00390625,
"accuracy": 0.6875,
"epoch": 0.1775,
"step": 71
},
{
"epoch": 0.18,
"grad_norm": 15.806035041809082,
"learning_rate": 4.315789473684211e-06,
"loss": 0.4743,
"step": 72
},
{
"Batch Sum": 203.03515625,
"accuracy": 0.84375,
"epoch": 0.18,
"step": 72
},
{
"Batch Sum": 223.71875,
"accuracy": 0.84375,
"epoch": 0.18,
"step": 72
},
{
"epoch": 0.1825,
"grad_norm": 11.65701961517334,
"learning_rate": 4.302631578947369e-06,
"loss": 0.3151,
"step": 73
},
{
"Batch Sum": 206.0234375,
"accuracy": 0.75,
"epoch": 0.1825,
"step": 73
},
{
"Batch Sum": 213.287109375,
"accuracy": 0.8125,
"epoch": 0.1825,
"step": 73
},
{
"epoch": 0.185,
"grad_norm": 14.198211669921875,
"learning_rate": 4.289473684210527e-06,
"loss": 0.4365,
"step": 74
},
{
"Batch Sum": 209.15234375,
"accuracy": 0.8125,
"epoch": 0.185,
"step": 74
},
{
"Batch Sum": 219.087890625,
"accuracy": 0.78125,
"epoch": 0.185,
"step": 74
},
{
"epoch": 0.1875,
"grad_norm": 13.063102722167969,
"learning_rate": 4.276315789473684e-06,
"loss": 0.4194,
"step": 75
},
{
"Batch Sum": 206.705078125,
"accuracy": 0.828125,
"epoch": 0.1875,
"step": 75
},
{
"Batch Sum": 208.162109375,
"accuracy": 0.828125,
"epoch": 0.1875,
"step": 75
},
{
"epoch": 0.19,
"grad_norm": 12.24936580657959,
"learning_rate": 4.2631578947368425e-06,
"loss": 0.4291,
"step": 76
},
{
"Batch Sum": 198.51953125,
"accuracy": 0.703125,
"epoch": 0.19,
"step": 76
},
{
"Batch Sum": 197.17578125,
"accuracy": 0.78125,
"epoch": 0.19,
"step": 76
},
{
"epoch": 0.1925,
"grad_norm": 21.39528465270996,
"learning_rate": 4.25e-06,
"loss": 0.4907,
"step": 77
},
{
"Batch Sum": 189.150390625,
"accuracy": 0.75,
"epoch": 0.1925,
"step": 77
},
{
"Batch Sum": 189.181640625,
"accuracy": 0.8125,
"epoch": 0.1925,
"step": 77
},
{
"epoch": 0.195,
"grad_norm": 12.943084716796875,
"learning_rate": 4.236842105263158e-06,
"loss": 0.4025,
"step": 78
},
{
"Batch Sum": 199.0009765625,
"accuracy": 0.75,
"epoch": 0.195,
"step": 78
},
{
"Batch Sum": 197.556640625,
"accuracy": 0.71875,
"epoch": 0.195,
"step": 78
},
{
"epoch": 0.1975,
"grad_norm": 14.870590209960938,
"learning_rate": 4.223684210526316e-06,
"loss": 0.4874,
"step": 79
},
{
"Batch Sum": 187.6875,
"accuracy": 0.875,
"epoch": 0.1975,
"step": 79
},
{
"Batch Sum": 191.8916015625,
"accuracy": 0.765625,
"epoch": 0.1975,
"step": 79
},
{
"epoch": 0.2,
"grad_norm": 15.593913078308105,
"learning_rate": 4.210526315789474e-06,
"loss": 0.3468,
"step": 80
},
{
"Batch Sum": 173.8515625,
"accuracy": 0.703125,
"epoch": 0.2,
"step": 80
},
{
"Batch Sum": 185.919921875,
"accuracy": 0.8125,
"epoch": 0.2,
"step": 80
},
{
"epoch": 0.2025,
"grad_norm": 11.869901657104492,
"learning_rate": 4.197368421052632e-06,
"loss": 0.4181,
"step": 81
},
{
"Batch Sum": 173.52197265625,
"accuracy": 0.828125,
"epoch": 0.2025,
"step": 81
},
{
"Batch Sum": 164.21044921875,
"accuracy": 0.84375,
"epoch": 0.2025,
"step": 81
},
{
"epoch": 0.205,
"grad_norm": 11.220733642578125,
"learning_rate": 4.18421052631579e-06,
"loss": 0.385,
"step": 82
},
{
"Batch Sum": 158.6143798828125,
"accuracy": 0.90625,
"epoch": 0.205,
"step": 82
},
{
"Batch Sum": 185.5107421875,
"accuracy": 0.796875,
"epoch": 0.205,
"step": 82
},
{
"epoch": 0.2075,
"grad_norm": 11.525442123413086,
"learning_rate": 4.171052631578948e-06,
"loss": 0.3701,
"step": 83
},
{
"Batch Sum": 162.90478515625,
"accuracy": 0.734375,
"epoch": 0.2075,
"step": 83
},
{
"Batch Sum": 163.460693359375,
"accuracy": 0.78125,
"epoch": 0.2075,
"step": 83
},
{
"epoch": 0.21,
"grad_norm": 13.735023498535156,
"learning_rate": 4.157894736842106e-06,
"loss": 0.5058,
"step": 84
},
{
"Batch Sum": 172.984375,
"accuracy": 0.71875,
"epoch": 0.21,
"step": 84
},
{
"Batch Sum": 151.54931640625,
"accuracy": 0.84375,
"epoch": 0.21,
"step": 84
},
{
"epoch": 0.2125,
"grad_norm": 12.669657707214355,
"learning_rate": 4.144736842105263e-06,
"loss": 0.4216,
"step": 85
},
{
"Batch Sum": 156.759765625,
"accuracy": 0.78125,
"epoch": 0.2125,
"step": 85
},
{
"Batch Sum": 157.17529296875,
"accuracy": 0.765625,
"epoch": 0.2125,
"step": 85
},
{
"epoch": 0.215,
"grad_norm": 12.282760620117188,
"learning_rate": 4.1315789473684216e-06,
"loss": 0.4175,
"step": 86
},
{
"Batch Sum": 153.5771484375,
"accuracy": 0.78125,
"epoch": 0.215,
"step": 86
},
{
"Batch Sum": 147.1307373046875,
"accuracy": 0.8125,
"epoch": 0.215,
"step": 86
},
{
"epoch": 0.2175,
"grad_norm": 13.767319679260254,
"learning_rate": 4.118421052631579e-06,
"loss": 0.4217,
"step": 87
},
{
"Batch Sum": 151.193115234375,
"accuracy": 0.734375,
"epoch": 0.2175,
"step": 87
},
{
"Batch Sum": 143.0897216796875,
"accuracy": 0.8125,
"epoch": 0.2175,
"step": 87
},
{
"epoch": 0.22,
"grad_norm": 12.140963554382324,
"learning_rate": 4.105263157894737e-06,
"loss": 0.4242,
"step": 88
},
{
"Batch Sum": 145.87548828125,
"accuracy": 0.765625,
"epoch": 0.22,
"step": 88
},
{
"Batch Sum": 150.4423828125,
"accuracy": 0.828125,
"epoch": 0.22,
"step": 88
},
{
"epoch": 0.2225,
"grad_norm": 10.421849250793457,
"learning_rate": 4.092105263157895e-06,
"loss": 0.4024,
"step": 89
},
{
"Batch Sum": 164.34619140625,
"accuracy": 0.84375,
"epoch": 0.2225,
"step": 89
},
{
"Batch Sum": 151.582275390625,
"accuracy": 0.84375,
"epoch": 0.2225,
"step": 89
},
{
"epoch": 0.225,
"grad_norm": 11.950529098510742,
"learning_rate": 4.078947368421053e-06,
"loss": 0.4162,
"step": 90
},
{
"Batch Sum": 177.598388671875,
"accuracy": 0.78125,
"epoch": 0.225,
"step": 90
},
{
"Batch Sum": 168.056884765625,
"accuracy": 0.75,
"epoch": 0.225,
"step": 90
},
{
"epoch": 0.2275,
"grad_norm": 13.336840629577637,
"learning_rate": 4.065789473684211e-06,
"loss": 0.4356,
"step": 91
},
{
"Batch Sum": 185.6875,
"accuracy": 0.828125,
"epoch": 0.2275,
"step": 91
},
{
"Batch Sum": 156.2733154296875,
"accuracy": 0.859375,
"epoch": 0.2275,
"step": 91
},
{
"epoch": 0.23,
"grad_norm": 10.884383201599121,
"learning_rate": 4.052631578947368e-06,
"loss": 0.3845,
"step": 92
},
{
"Batch Sum": 179.4698486328125,
"accuracy": 0.90625,
"epoch": 0.23,
"step": 92
},
{
"Batch Sum": 171.6319580078125,
"accuracy": 0.796875,
"epoch": 0.23,
"step": 92
},
{
"epoch": 0.2325,
"grad_norm": 11.024062156677246,
"learning_rate": 4.0394736842105265e-06,
"loss": 0.3715,
"step": 93
},
{
"Batch Sum": 170.8381805419922,
"accuracy": 0.75,
"epoch": 0.2325,
"step": 93
},
{
"Batch Sum": 173.384765625,
"accuracy": 0.8125,
"epoch": 0.2325,
"step": 93
},
{
"epoch": 0.235,
"grad_norm": 10.82882022857666,
"learning_rate": 4.026315789473684e-06,
"loss": 0.4289,
"step": 94
},
{
"Batch Sum": 164.244140625,
"accuracy": 0.75,
"epoch": 0.235,
"step": 94
},
{
"Batch Sum": 180.776123046875,
"accuracy": 0.796875,
"epoch": 0.235,
"step": 94
},
{
"epoch": 0.2375,
"grad_norm": 14.97029972076416,
"learning_rate": 4.013157894736842e-06,
"loss": 0.4478,
"step": 95
},
{
"Batch Sum": 174.9443359375,
"accuracy": 0.859375,
"epoch": 0.2375,
"step": 95
},
{
"Batch Sum": 168.7977294921875,
"accuracy": 0.84375,
"epoch": 0.2375,
"step": 95
},
{
"epoch": 0.24,
"grad_norm": 9.570242881774902,
"learning_rate": 4.000000000000001e-06,
"loss": 0.3363,
"step": 96
},
{
"Batch Sum": 162.29507446289062,
"accuracy": 0.75,
"epoch": 0.24,
"step": 96
},
{
"Batch Sum": 167.9951171875,
"accuracy": 0.734375,
"epoch": 0.24,
"step": 96
},
{
"epoch": 0.2425,
"grad_norm": 17.29254913330078,
"learning_rate": 3.986842105263158e-06,
"loss": 0.5527,
"step": 97
},
{
"Batch Sum": 154.8450927734375,
"accuracy": 0.859375,
"epoch": 0.2425,
"step": 97
},
{
"Batch Sum": 164.18359375,
"accuracy": 0.8125,
"epoch": 0.2425,
"step": 97
},
{
"epoch": 0.245,
"grad_norm": 10.639801025390625,
"learning_rate": 3.9736842105263165e-06,
"loss": 0.3685,
"step": 98
},
{
"Batch Sum": 161.786865234375,
"accuracy": 0.765625,
"epoch": 0.245,
"step": 98
},
{
"Batch Sum": 167.194091796875,
"accuracy": 0.796875,
"epoch": 0.245,
"step": 98
},
{
"epoch": 0.2475,
"grad_norm": 11.962891578674316,
"learning_rate": 3.960526315789474e-06,
"loss": 0.4209,
"step": 99
},
{
"Batch Sum": 156.116943359375,
"accuracy": 0.796875,
"epoch": 0.2475,
"step": 99
},
{
"Batch Sum": 158.47119140625,
"accuracy": 0.75,
"epoch": 0.2475,
"step": 99
},
{
"epoch": 0.25,
"grad_norm": 10.670126914978027,
"learning_rate": 3.947368421052632e-06,
"loss": 0.4568,
"step": 100
},
{
"Batch Sum": 151.5293731689453,
"accuracy": 0.796875,
"epoch": 0.25,
"step": 100
},
{
"Batch Sum": 139.802490234375,
"accuracy": 0.75,
"epoch": 0.25,
"step": 100
},
{
"epoch": 0.2525,
"grad_norm": 10.257286071777344,
"learning_rate": 3.93421052631579e-06,
"loss": 0.4065,
"step": 101
},
{
"Batch Sum": 145.30331420898438,
"accuracy": 0.734375,
"epoch": 0.2525,
"step": 101
},
{
"Batch Sum": 142.9066162109375,
"accuracy": 0.84375,
"epoch": 0.2525,
"step": 101
},
{
"epoch": 0.255,
"grad_norm": 8.931941032409668,
"learning_rate": 3.921052631578947e-06,
"loss": 0.3486,
"step": 102
},
{
"Batch Sum": 129.5537109375,
"accuracy": 0.765625,
"epoch": 0.255,
"step": 102
},
{
"Batch Sum": 139.361328125,
"accuracy": 0.84375,
"epoch": 0.255,
"step": 102
},
{
"epoch": 0.2575,
"grad_norm": 11.819692611694336,
"learning_rate": 3.907894736842106e-06,
"loss": 0.4768,
"step": 103
},
{
"Batch Sum": 136.63330078125,
"accuracy": 0.859375,
"epoch": 0.2575,
"step": 103
},
{
"Batch Sum": 126.9212646484375,
"accuracy": 0.859375,
"epoch": 0.2575,
"step": 103
},
{
"epoch": 0.26,
"grad_norm": 10.561305046081543,
"learning_rate": 3.894736842105263e-06,
"loss": 0.3266,
"step": 104
},
{
"Batch Sum": 119.14749145507812,
"accuracy": 0.75,
"epoch": 0.26,
"step": 104
},
{
"Batch Sum": 115.628662109375,
"accuracy": 0.8125,
"epoch": 0.26,
"step": 104
},
{
"epoch": 0.2625,
"grad_norm": 28.31745719909668,
"learning_rate": 3.8815789473684214e-06,
"loss": 0.4407,
"step": 105
},
{
"Batch Sum": 127.26849365234375,
"accuracy": 0.796875,
"epoch": 0.2625,
"step": 105
},
{
"Batch Sum": 140.0218505859375,
"accuracy": 0.828125,
"epoch": 0.2625,
"step": 105
},
{
"epoch": 0.265,
"grad_norm": 12.298383712768555,
"learning_rate": 3.868421052631579e-06,
"loss": 0.4529,
"step": 106
},
{
"Batch Sum": 136.11062622070312,
"accuracy": 0.828125,
"epoch": 0.265,
"step": 106
},
{
"Batch Sum": 139.4700927734375,
"accuracy": 0.765625,
"epoch": 0.265,
"step": 106
},
{
"epoch": 0.2675,
"grad_norm": 11.664482116699219,
"learning_rate": 3.855263157894737e-06,
"loss": 0.4132,
"step": 107
},
{
"Batch Sum": 139.60125732421875,
"accuracy": 0.734375,
"epoch": 0.2675,
"step": 107
},
{
"Batch Sum": 137.16650390625,
"accuracy": 0.796875,
"epoch": 0.2675,
"step": 107
},
{
"epoch": 0.27,
"grad_norm": 10.46422290802002,
"learning_rate": 3.842105263157895e-06,
"loss": 0.3772,
"step": 108
},
{
"Batch Sum": 130.9759521484375,
"accuracy": 0.71875,
"epoch": 0.27,
"step": 108
},
{
"Batch Sum": 132.84173583984375,
"accuracy": 0.78125,
"epoch": 0.27,
"step": 108
},
{
"epoch": 0.2725,
"grad_norm": 11.786149024963379,
"learning_rate": 3.828947368421053e-06,
"loss": 0.4741,
"step": 109
},
{
"Batch Sum": 131.52923583984375,
"accuracy": 0.703125,
"epoch": 0.2725,
"step": 109
},
{
"Batch Sum": 133.90191650390625,
"accuracy": 0.78125,
"epoch": 0.2725,
"step": 109
},
{
"epoch": 0.275,
"grad_norm": 13.17595100402832,
"learning_rate": 3.815789473684211e-06,
"loss": 0.521,
"step": 110
},
{
"Batch Sum": 108.73193359375,
"accuracy": 0.828125,
"epoch": 0.275,
"step": 110
},
{
"Batch Sum": 112.35197448730469,
"accuracy": 0.78125,
"epoch": 0.275,
"step": 110
},
{
"epoch": 0.2775,
"grad_norm": 10.771636009216309,
"learning_rate": 3.802631578947369e-06,
"loss": 0.3971,
"step": 111
},
{
"Batch Sum": 121.26911926269531,
"accuracy": 0.828125,
"epoch": 0.2775,
"step": 111
},
{
"Batch Sum": 126.85107421875,
"accuracy": 0.90625,
"epoch": 0.2775,
"step": 111
},
{
"epoch": 0.28,
"grad_norm": 9.355616569519043,
"learning_rate": 3.789473684210527e-06,
"loss": 0.3219,
"step": 112
},
{
"Batch Sum": 112.09989929199219,
"accuracy": 0.796875,
"epoch": 0.28,
"step": 112
},
{
"Batch Sum": 118.8616943359375,
"accuracy": 0.75,
"epoch": 0.28,
"step": 112
},
{
"epoch": 0.2825,
"grad_norm": 9.97263240814209,
"learning_rate": 3.7763157894736847e-06,
"loss": 0.4534,
"step": 113
},
{
"Batch Sum": 112.02926635742188,
"accuracy": 0.84375,
"epoch": 0.2825,
"step": 113
},
{
"Batch Sum": 128.11767578125,
"accuracy": 0.90625,
"epoch": 0.2825,
"step": 113
},
{
"epoch": 0.285,
"grad_norm": 8.701355934143066,
"learning_rate": 3.7631578947368426e-06,
"loss": 0.3521,
"step": 114
},
{
"Batch Sum": 114.733154296875,
"accuracy": 0.828125,
"epoch": 0.285,
"step": 114
},
{
"Batch Sum": 120.71221923828125,
"accuracy": 0.875,
"epoch": 0.285,
"step": 114
},
{
"epoch": 0.2875,
"grad_norm": 10.165462493896484,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.3602,
"step": 115
},
{
"Batch Sum": 130.4957275390625,
"accuracy": 0.875,
"epoch": 0.2875,
"step": 115
},
{
"Batch Sum": 129.851318359375,
"accuracy": 0.828125,
"epoch": 0.2875,
"step": 115
},
{
"epoch": 0.29,
"grad_norm": 8.85599422454834,
"learning_rate": 3.736842105263158e-06,
"loss": 0.3406,
"step": 116
},
{
"Batch Sum": 153.54833984375,
"accuracy": 0.828125,
"epoch": 0.29,
"step": 116
},
{
"Batch Sum": 117.29022216796875,
"accuracy": 0.734375,
"epoch": 0.29,
"step": 116
},
{
"epoch": 0.2925,
"grad_norm": 11.120735168457031,
"learning_rate": 3.723684210526316e-06,
"loss": 0.4414,
"step": 117
},
{
"Batch Sum": 133.2943115234375,
"accuracy": 0.8125,
"epoch": 0.2925,
"step": 117
},
{
"Batch Sum": 154.751708984375,
"accuracy": 0.765625,
"epoch": 0.2925,
"step": 117
},
{
"epoch": 0.295,
"grad_norm": 9.896623611450195,
"learning_rate": 3.710526315789474e-06,
"loss": 0.4025,
"step": 118
},
{
"Batch Sum": 146.9268798828125,
"accuracy": 0.796875,
"epoch": 0.295,
"step": 118
},
{
"Batch Sum": 157.404541015625,
"accuracy": 0.84375,
"epoch": 0.295,
"step": 118
},
{
"epoch": 0.2975,
"grad_norm": 12.310927391052246,
"learning_rate": 3.6973684210526317e-06,
"loss": 0.3888,
"step": 119
},
{
"Batch Sum": 177.720458984375,
"accuracy": 0.734375,
"epoch": 0.2975,
"step": 119
},
{
"Batch Sum": 154.5662841796875,
"accuracy": 0.828125,
"epoch": 0.2975,
"step": 119
},
{
"epoch": 0.3,
"grad_norm": 13.395303726196289,
"learning_rate": 3.6842105263157896e-06,
"loss": 0.4263,
"step": 120
},
{
"Batch Sum": 165.2161865234375,
"accuracy": 0.75,
"epoch": 0.3,
"step": 120
},
{
"Batch Sum": 154.25457763671875,
"accuracy": 0.765625,
"epoch": 0.3,
"step": 120
},
{
"epoch": 0.3025,
"grad_norm": 18.222015380859375,
"learning_rate": 3.6710526315789476e-06,
"loss": 0.5583,
"step": 121
},
{
"Batch Sum": 156.56472778320312,
"accuracy": 0.734375,
"epoch": 0.3025,
"step": 121
},
{
"Batch Sum": 154.8548583984375,
"accuracy": 0.8125,
"epoch": 0.3025,
"step": 121
},
{
"epoch": 0.305,
"grad_norm": 12.073637008666992,
"learning_rate": 3.657894736842106e-06,
"loss": 0.4433,
"step": 122
},
{
"Batch Sum": 191.345703125,
"accuracy": 0.8125,
"epoch": 0.305,
"step": 122
},
{
"Batch Sum": 194.02685546875,
"accuracy": 0.859375,
"epoch": 0.305,
"step": 122
},
{
"epoch": 0.3075,
"grad_norm": 11.347955703735352,
"learning_rate": 3.644736842105264e-06,
"loss": 0.3678,
"step": 123
},
{
"Batch Sum": 154.679443359375,
"accuracy": 0.859375,
"epoch": 0.3075,
"step": 123
},
{
"Batch Sum": 151.9825439453125,
"accuracy": 0.890625,
"epoch": 0.3075,
"step": 123
},
{
"epoch": 0.31,
"grad_norm": 9.588033676147461,
"learning_rate": 3.6315789473684217e-06,
"loss": 0.2785,
"step": 124
},
{
"Batch Sum": 163.2354278564453,
"accuracy": 0.796875,
"epoch": 0.31,
"step": 124
},
{
"Batch Sum": 176.15380859375,
"accuracy": 0.765625,
"epoch": 0.31,
"step": 124
},
{
"epoch": 0.3125,
"grad_norm": 13.149937629699707,
"learning_rate": 3.618421052631579e-06,
"loss": 0.4441,
"step": 125
},
{
"Batch Sum": 168.91845703125,
"accuracy": 0.765625,
"epoch": 0.3125,
"step": 125
},
{
"Batch Sum": 160.57781982421875,
"accuracy": 0.875,
"epoch": 0.3125,
"step": 125
},
{
"epoch": 0.315,
"grad_norm": 11.270033836364746,
"learning_rate": 3.605263157894737e-06,
"loss": 0.3817,
"step": 126
},
{
"Batch Sum": 154.63726806640625,
"accuracy": 0.796875,
"epoch": 0.315,
"step": 126
},
{
"Batch Sum": 161.947265625,
"accuracy": 0.8125,
"epoch": 0.315,
"step": 126
},
{
"epoch": 0.3175,
"grad_norm": 11.978543281555176,
"learning_rate": 3.592105263157895e-06,
"loss": 0.4194,
"step": 127
},
{
"Batch Sum": 158.22998046875,
"accuracy": 0.8125,
"epoch": 0.3175,
"step": 127
},
{
"Batch Sum": 155.70001220703125,
"accuracy": 0.703125,
"epoch": 0.3175,
"step": 127
},
{
"epoch": 0.32,
"grad_norm": 10.93179702758789,
"learning_rate": 3.578947368421053e-06,
"loss": 0.3997,
"step": 128
},
{
"Batch Sum": 156.26416015625,
"accuracy": 0.796875,
"epoch": 0.32,
"step": 128
},
{
"Batch Sum": 147.02413940429688,
"accuracy": 0.71875,
"epoch": 0.32,
"step": 128
},
{
"epoch": 0.3225,
"grad_norm": 11.40749740600586,
"learning_rate": 3.565789473684211e-06,
"loss": 0.4216,
"step": 129
},
{
"Batch Sum": 152.162109375,
"accuracy": 0.828125,
"epoch": 0.3225,
"step": 129
},
{
"Batch Sum": 156.4219970703125,
"accuracy": 0.828125,
"epoch": 0.3225,
"step": 129
},
{
"epoch": 0.325,
"grad_norm": 10.318909645080566,
"learning_rate": 3.5526315789473687e-06,
"loss": 0.3745,
"step": 130
},
{
"Batch Sum": 157.092529296875,
"accuracy": 0.75,
"epoch": 0.325,
"step": 130
},
{
"Batch Sum": 152.626953125,
"accuracy": 0.6875,
"epoch": 0.325,
"step": 130
},
{
"epoch": 0.3275,
"grad_norm": 14.140223503112793,
"learning_rate": 3.5394736842105266e-06,
"loss": 0.5467,
"step": 131
},
{
"Batch Sum": 148.7054443359375,
"accuracy": 0.78125,
"epoch": 0.3275,
"step": 131
},
{
"Batch Sum": 156.0888671875,
"accuracy": 0.796875,
"epoch": 0.3275,
"step": 131
},
{
"epoch": 0.33,
"grad_norm": 10.437580108642578,
"learning_rate": 3.5263157894736846e-06,
"loss": 0.426,
"step": 132
},
{
"Batch Sum": 143.746337890625,
"accuracy": 0.875,
"epoch": 0.33,
"step": 132
},
{
"Batch Sum": 159.1666259765625,
"accuracy": 0.890625,
"epoch": 0.33,
"step": 132
},
{
"epoch": 0.3325,
"grad_norm": 9.603164672851562,
"learning_rate": 3.513157894736842e-06,
"loss": 0.3198,
"step": 133
},
{
"Batch Sum": 142.6644287109375,
"accuracy": 0.765625,
"epoch": 0.3325,
"step": 133
},
{
"Batch Sum": 138.824951171875,
"accuracy": 0.734375,
"epoch": 0.3325,
"step": 133
},
{
"epoch": 0.335,
"grad_norm": 10.18635368347168,
"learning_rate": 3.5e-06,
"loss": 0.4675,
"step": 134
},
{
"Batch Sum": 145.2435302734375,
"accuracy": 0.828125,
"epoch": 0.335,
"step": 134
},
{
"Batch Sum": 133.55718994140625,
"accuracy": 0.84375,
"epoch": 0.335,
"step": 134
},
{
"epoch": 0.3375,
"grad_norm": 8.98958683013916,
"learning_rate": 3.486842105263158e-06,
"loss": 0.394,
"step": 135
},
{
"Batch Sum": 144.40631103515625,
"accuracy": 0.828125,
"epoch": 0.3375,
"step": 135
},
{
"Batch Sum": 155.13470458984375,
"accuracy": 0.828125,
"epoch": 0.3375,
"step": 135
},
{
"epoch": 0.34,
"grad_norm": 10.116023063659668,
"learning_rate": 3.473684210526316e-06,
"loss": 0.3919,
"step": 136
},
{
"Batch Sum": 157.0986328125,
"accuracy": 0.828125,
"epoch": 0.34,
"step": 136
},
{
"Batch Sum": 151.594970703125,
"accuracy": 0.84375,
"epoch": 0.34,
"step": 136
},
{
"epoch": 0.3425,
"grad_norm": 9.4917573928833,
"learning_rate": 3.460526315789474e-06,
"loss": 0.3728,
"step": 137
},
{
"Batch Sum": 144.12567138671875,
"accuracy": 0.828125,
"epoch": 0.3425,
"step": 137
},
{
"Batch Sum": 158.22021484375,
"accuracy": 0.828125,
"epoch": 0.3425,
"step": 137
},
{
"epoch": 0.345,
"grad_norm": 9.91769027709961,
"learning_rate": 3.447368421052632e-06,
"loss": 0.3612,
"step": 138
},
{
"Batch Sum": 163.3560791015625,
"accuracy": 0.765625,
"epoch": 0.345,
"step": 138
},
{
"Batch Sum": 165.13037109375,
"accuracy": 0.8125,
"epoch": 0.345,
"step": 138
},
{
"epoch": 0.3475,
"grad_norm": 11.594426155090332,
"learning_rate": 3.43421052631579e-06,
"loss": 0.4229,
"step": 139
},
{
"Batch Sum": 176.0669708251953,
"accuracy": 0.75,
"epoch": 0.3475,
"step": 139
},
{
"Batch Sum": 170.3831787109375,
"accuracy": 0.8125,
"epoch": 0.3475,
"step": 139
},
{
"epoch": 0.35,
"grad_norm": 12.000874519348145,
"learning_rate": 3.421052631578948e-06,
"loss": 0.448,
"step": 140
},
{
"Batch Sum": 173.9276123046875,
"accuracy": 0.8125,
"epoch": 0.35,
"step": 140
},
{
"Batch Sum": 171.8037109375,
"accuracy": 0.78125,
"epoch": 0.35,
"step": 140
},
{
"epoch": 0.3525,
"grad_norm": 13.811454772949219,
"learning_rate": 3.4078947368421057e-06,
"loss": 0.4739,
"step": 141
},
{
"Batch Sum": 167.4185791015625,
"accuracy": 0.796875,
"epoch": 0.3525,
"step": 141
},
{
"Batch Sum": 169.88824462890625,
"accuracy": 0.734375,
"epoch": 0.3525,
"step": 141
},
{
"epoch": 0.355,
"grad_norm": 12.386519432067871,
"learning_rate": 3.3947368421052636e-06,
"loss": 0.4468,
"step": 142
},
{
"Batch Sum": 172.253173828125,
"accuracy": 0.875,
"epoch": 0.355,
"step": 142
},
{
"Batch Sum": 179.5269775390625,
"accuracy": 0.765625,
"epoch": 0.355,
"step": 142
},
{
"epoch": 0.3575,
"grad_norm": 10.997251510620117,
"learning_rate": 3.381578947368421e-06,
"loss": 0.3703,
"step": 143
},
{
"Batch Sum": 177.33450317382812,
"accuracy": 0.859375,
"epoch": 0.3575,
"step": 143
},
{
"Batch Sum": 168.30322265625,
"accuracy": 0.796875,
"epoch": 0.3575,
"step": 143
},
{
"epoch": 0.36,
"grad_norm": 10.775482177734375,
"learning_rate": 3.368421052631579e-06,
"loss": 0.3419,
"step": 144
},
{
"Batch Sum": 177.181396484375,
"accuracy": 0.78125,
"epoch": 0.36,
"step": 144
},
{
"Batch Sum": 179.53369140625,
"accuracy": 0.8125,
"epoch": 0.36,
"step": 144
},
{
"epoch": 0.3625,
"grad_norm": 11.77710247039795,
"learning_rate": 3.355263157894737e-06,
"loss": 0.442,
"step": 145
},
{
"Batch Sum": 163.1304931640625,
"accuracy": 0.84375,
"epoch": 0.3625,
"step": 145
},
{
"Batch Sum": 173.8359375,
"accuracy": 0.78125,
"epoch": 0.3625,
"step": 145
},
{
"epoch": 0.365,
"grad_norm": 10.837456703186035,
"learning_rate": 3.342105263157895e-06,
"loss": 0.3772,
"step": 146
},
{
"Batch Sum": 168.785888671875,
"accuracy": 0.765625,
"epoch": 0.365,
"step": 146
},
{
"Batch Sum": 162.473388671875,
"accuracy": 0.796875,
"epoch": 0.365,
"step": 146
},
{
"epoch": 0.3675,
"grad_norm": 12.898738861083984,
"learning_rate": 3.3289473684210528e-06,
"loss": 0.4284,
"step": 147
},
{
"Batch Sum": 155.8890380859375,
"accuracy": 0.8125,
"epoch": 0.3675,
"step": 147
},
{
"Batch Sum": 163.2218017578125,
"accuracy": 0.84375,
"epoch": 0.3675,
"step": 147
},
{
"epoch": 0.37,
"grad_norm": 10.30620288848877,
"learning_rate": 3.3157894736842107e-06,
"loss": 0.3338,
"step": 148
},
{
"Batch Sum": 179.05615234375,
"accuracy": 0.84375,
"epoch": 0.37,
"step": 148
},
{
"Batch Sum": 165.72119140625,
"accuracy": 0.8125,
"epoch": 0.37,
"step": 148
},
{
"epoch": 0.3725,
"grad_norm": 10.406338691711426,
"learning_rate": 3.302631578947369e-06,
"loss": 0.3486,
"step": 149
},
{
"Batch Sum": 153.40414428710938,
"accuracy": 0.859375,
"epoch": 0.3725,
"step": 149
},
{
"Batch Sum": 157.75411987304688,
"accuracy": 0.796875,
"epoch": 0.3725,
"step": 149
},
{
"epoch": 0.375,
"grad_norm": 11.733957290649414,
"learning_rate": 3.289473684210527e-06,
"loss": 0.36,
"step": 150
},
{
"Batch Sum": 179.1650390625,
"accuracy": 0.875,
"epoch": 0.375,
"step": 150
},
{
"Batch Sum": 178.9234619140625,
"accuracy": 0.875,
"epoch": 0.375,
"step": 150
},
{
"epoch": 0.3775,
"grad_norm": 9.269570350646973,
"learning_rate": 3.276315789473685e-06,
"loss": 0.2708,
"step": 151
},
{
"Batch Sum": 179.40478515625,
"accuracy": 0.84375,
"epoch": 0.3775,
"step": 151
},
{
"Batch Sum": 162.3435516357422,
"accuracy": 0.8125,
"epoch": 0.3775,
"step": 151
},
{
"epoch": 0.38,
"grad_norm": 12.036487579345703,
"learning_rate": 3.2631578947368423e-06,
"loss": 0.3789,
"step": 152
},
{
"Batch Sum": 154.46905517578125,
"accuracy": 0.84375,
"epoch": 0.38,
"step": 152
},
{
"Batch Sum": 176.2408447265625,
"accuracy": 0.8125,
"epoch": 0.38,
"step": 152
},
{
"epoch": 0.3825,
"grad_norm": 10.829084396362305,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.3816,
"step": 153
},
{
"Batch Sum": 171.27947998046875,
"accuracy": 0.75,
"epoch": 0.3825,
"step": 153
},
{
"Batch Sum": 181.0279541015625,
"accuracy": 0.859375,
"epoch": 0.3825,
"step": 153
},
{
"epoch": 0.385,
"grad_norm": 13.027417182922363,
"learning_rate": 3.236842105263158e-06,
"loss": 0.4191,
"step": 154
},
{
"Batch Sum": 173.9217529296875,
"accuracy": 0.8125,
"epoch": 0.385,
"step": 154
},
{
"Batch Sum": 170.60922241210938,
"accuracy": 0.765625,
"epoch": 0.385,
"step": 154
},
{
"epoch": 0.3875,
"grad_norm": 15.362863540649414,
"learning_rate": 3.223684210526316e-06,
"loss": 0.4455,
"step": 155
},
{
"Batch Sum": 185.8359375,
"accuracy": 0.875,
"epoch": 0.3875,
"step": 155
},
{
"Batch Sum": 175.69873046875,
"accuracy": 0.859375,
"epoch": 0.3875,
"step": 155
},
{
"epoch": 0.39,
"grad_norm": 11.39478874206543,
"learning_rate": 3.210526315789474e-06,
"loss": 0.3245,
"step": 156
},
{
"Batch Sum": 187.887451171875,
"accuracy": 0.796875,
"epoch": 0.39,
"step": 156
},
{
"Batch Sum": 176.8115234375,
"accuracy": 0.8125,
"epoch": 0.39,
"step": 156
},
{
"epoch": 0.3925,
"grad_norm": 12.699715614318848,
"learning_rate": 3.197368421052632e-06,
"loss": 0.3727,
"step": 157
},
{
"Batch Sum": 174.041015625,
"accuracy": 0.875,
"epoch": 0.3925,
"step": 157
},
{
"Batch Sum": 175.4111328125,
"accuracy": 0.734375,
"epoch": 0.3925,
"step": 157
},
{
"epoch": 0.395,
"grad_norm": 13.248059272766113,
"learning_rate": 3.1842105263157898e-06,
"loss": 0.434,
"step": 158
},
{
"Batch Sum": 163.93408203125,
"accuracy": 0.796875,
"epoch": 0.395,
"step": 158
},
{
"Batch Sum": 172.42626953125,
"accuracy": 0.84375,
"epoch": 0.395,
"step": 158
},
{
"epoch": 0.3975,
"grad_norm": 12.247123718261719,
"learning_rate": 3.1710526315789477e-06,
"loss": 0.3552,
"step": 159
},
{
"Batch Sum": 159.5538330078125,
"accuracy": 0.796875,
"epoch": 0.3975,
"step": 159
},
{
"Batch Sum": 172.398193359375,
"accuracy": 0.84375,
"epoch": 0.3975,
"step": 159
},
{
"epoch": 0.4,
"grad_norm": 13.754870414733887,
"learning_rate": 3.157894736842105e-06,
"loss": 0.4638,
"step": 160
},
{
"Batch Sum": 179.1923828125,
"accuracy": 0.859375,
"epoch": 0.4,
"step": 160
},
{
"Batch Sum": 161.6951904296875,
"accuracy": 0.8125,
"epoch": 0.4,
"step": 160
},
{
"epoch": 0.4025,
"grad_norm": 9.910033226013184,
"learning_rate": 3.144736842105263e-06,
"loss": 0.3066,
"step": 161
},
{
"Batch Sum": 169.45782470703125,
"accuracy": 0.859375,
"epoch": 0.4025,
"step": 161
},
{
"Batch Sum": 157.14697265625,
"accuracy": 0.890625,
"epoch": 0.4025,
"step": 161
},
{
"epoch": 0.405,
"grad_norm": 9.645856857299805,
"learning_rate": 3.131578947368421e-06,
"loss": 0.2836,
"step": 162
},
{
"Batch Sum": 153.429443359375,
"accuracy": 0.75,
"epoch": 0.405,
"step": 162
},
{
"Batch Sum": 159.56504821777344,
"accuracy": 0.859375,
"epoch": 0.405,
"step": 162
},
{
"epoch": 0.4075,
"grad_norm": 11.657127380371094,
"learning_rate": 3.1184210526315793e-06,
"loss": 0.4141,
"step": 163
},
{
"Batch Sum": 157.7459716796875,
"accuracy": 0.734375,
"epoch": 0.4075,
"step": 163
},
{
"Batch Sum": 163.69723510742188,
"accuracy": 0.796875,
"epoch": 0.4075,
"step": 163
},
{
"epoch": 0.41,
"grad_norm": 13.670110702514648,
"learning_rate": 3.1052631578947372e-06,
"loss": 0.4772,
"step": 164
},
{
"Batch Sum": 157.6031494140625,
"accuracy": 0.890625,
"epoch": 0.41,
"step": 164
},
{
"Batch Sum": 172.3399658203125,
"accuracy": 0.828125,
"epoch": 0.41,
"step": 164
},
{
"epoch": 0.4125,
"grad_norm": 9.068910598754883,
"learning_rate": 3.092105263157895e-06,
"loss": 0.2752,
"step": 165
},
{
"Batch Sum": 160.44140625,
"accuracy": 0.859375,
"epoch": 0.4125,
"step": 165
},
{
"Batch Sum": 169.0107421875,
"accuracy": 0.71875,
"epoch": 0.4125,
"step": 165
},
{
"epoch": 0.415,
"grad_norm": 12.532942771911621,
"learning_rate": 3.078947368421053e-06,
"loss": 0.4162,
"step": 166
},
{
"Batch Sum": 150.088623046875,
"accuracy": 0.90625,
"epoch": 0.415,
"step": 166
},
{
"Batch Sum": 163.49560546875,
"accuracy": 0.765625,
"epoch": 0.415,
"step": 166
},
{
"epoch": 0.4175,
"grad_norm": 11.255102157592773,
"learning_rate": 3.065789473684211e-06,
"loss": 0.3426,
"step": 167
},
{
"Batch Sum": 158.72216796875,
"accuracy": 0.78125,
"epoch": 0.4175,
"step": 167
},
{
"Batch Sum": 157.4320068359375,
"accuracy": 0.75,
"epoch": 0.4175,
"step": 167
},
{
"epoch": 0.42,
"grad_norm": 11.509016036987305,
"learning_rate": 3.052631578947369e-06,
"loss": 0.3974,
"step": 168
},
{
"Batch Sum": 170.9871826171875,
"accuracy": 0.859375,
"epoch": 0.42,
"step": 168
},
{
"Batch Sum": 151.966796875,
"accuracy": 0.859375,
"epoch": 0.42,
"step": 168
},
{
"epoch": 0.4225,
"grad_norm": 11.246017456054688,
"learning_rate": 3.0394736842105268e-06,
"loss": 0.346,
"step": 169
},
{
"Batch Sum": 141.69485473632812,
"accuracy": 0.8125,
"epoch": 0.4225,
"step": 169
},
{
"Batch Sum": 177.1005859375,
"accuracy": 0.84375,
"epoch": 0.4225,
"step": 169
},
{
"epoch": 0.425,
"grad_norm": 12.605325698852539,
"learning_rate": 3.0263157894736843e-06,
"loss": 0.3574,
"step": 170
},
{
"Batch Sum": 158.830078125,
"accuracy": 0.796875,
"epoch": 0.425,
"step": 170
},
{
"Batch Sum": 171.9595947265625,
"accuracy": 0.78125,
"epoch": 0.425,
"step": 170
},
{
"epoch": 0.4275,
"grad_norm": 11.472408294677734,
"learning_rate": 3.013157894736842e-06,
"loss": 0.3778,
"step": 171
},
{
"Batch Sum": 164.240966796875,
"accuracy": 0.75,
"epoch": 0.4275,
"step": 171
},
{
"Batch Sum": 187.87646484375,
"accuracy": 0.84375,
"epoch": 0.4275,
"step": 171
},
{
"epoch": 0.43,
"grad_norm": 12.420621871948242,
"learning_rate": 3e-06,
"loss": 0.3898,
"step": 172
},
{
"Batch Sum": 168.580810546875,
"accuracy": 0.796875,
"epoch": 0.43,
"step": 172
},
{
"Batch Sum": 183.23199462890625,
"accuracy": 0.84375,
"epoch": 0.43,
"step": 172
},
{
"epoch": 0.4325,
"grad_norm": 11.917014122009277,
"learning_rate": 2.986842105263158e-06,
"loss": 0.3652,
"step": 173
},
{
"Batch Sum": 180.752685546875,
"accuracy": 0.8125,
"epoch": 0.4325,
"step": 173
},
{
"Batch Sum": 174.8634033203125,
"accuracy": 0.78125,
"epoch": 0.4325,
"step": 173
},
{
"epoch": 0.435,
"grad_norm": 13.711872100830078,
"learning_rate": 2.973684210526316e-06,
"loss": 0.4049,
"step": 174
},
{
"Batch Sum": 187.95513916015625,
"accuracy": 0.859375,
"epoch": 0.435,
"step": 174
},
{
"Batch Sum": 175.5262451171875,
"accuracy": 0.90625,
"epoch": 0.435,
"step": 174
},
{
"epoch": 0.4375,
"grad_norm": 13.002115249633789,
"learning_rate": 2.960526315789474e-06,
"loss": 0.3187,
"step": 175
},
{
"Batch Sum": 193.835693359375,
"accuracy": 0.890625,
"epoch": 0.4375,
"step": 175
},
{
"Batch Sum": 178.1373291015625,
"accuracy": 0.875,
"epoch": 0.4375,
"step": 175
},
{
"epoch": 0.44,
"grad_norm": 10.474250793457031,
"learning_rate": 2.9473684210526317e-06,
"loss": 0.2761,
"step": 176
},
{
"Batch Sum": 204.98779296875,
"accuracy": 0.78125,
"epoch": 0.44,
"step": 176
},
{
"Batch Sum": 176.36172485351562,
"accuracy": 0.84375,
"epoch": 0.44,
"step": 176
},
{
"epoch": 0.4425,
"grad_norm": 13.780770301818848,
"learning_rate": 2.93421052631579e-06,
"loss": 0.4051,
"step": 177
},
{
"Batch Sum": 182.67578125,
"accuracy": 0.890625,
"epoch": 0.4425,
"step": 177
},
{
"Batch Sum": 186.214111328125,
"accuracy": 0.859375,
"epoch": 0.4425,
"step": 177
},
{
"epoch": 0.445,
"grad_norm": 11.39830207824707,
"learning_rate": 2.921052631578948e-06,
"loss": 0.3473,
"step": 178
},
{
"Batch Sum": 177.402587890625,
"accuracy": 0.84375,
"epoch": 0.445,
"step": 178
},
{
"Batch Sum": 189.29144287109375,
"accuracy": 0.828125,
"epoch": 0.445,
"step": 178
},
{
"epoch": 0.4475,
"grad_norm": 13.348224639892578,
"learning_rate": 2.907894736842106e-06,
"loss": 0.3464,
"step": 179
},
{
"Batch Sum": 184.62939453125,
"accuracy": 0.859375,
"epoch": 0.4475,
"step": 179
},
{
"Batch Sum": 171.792236328125,
"accuracy": 0.78125,
"epoch": 0.4475,
"step": 179
},
{
"epoch": 0.45,
"grad_norm": 13.45236587524414,
"learning_rate": 2.8947368421052634e-06,
"loss": 0.3707,
"step": 180
},
{
"Batch Sum": 183.537353515625,
"accuracy": 0.828125,
"epoch": 0.45,
"step": 180
},
{
"Batch Sum": 184.44140625,
"accuracy": 0.875,
"epoch": 0.45,
"step": 180
},
{
"epoch": 0.4525,
"grad_norm": 11.033170700073242,
"learning_rate": 2.8815789473684213e-06,
"loss": 0.3141,
"step": 181
},
{
"Batch Sum": 157.82540893554688,
"accuracy": 0.796875,
"epoch": 0.4525,
"step": 181
},
{
"Batch Sum": 178.438720703125,
"accuracy": 0.796875,
"epoch": 0.4525,
"step": 181
},
{
"epoch": 0.455,
"grad_norm": 13.729015350341797,
"learning_rate": 2.868421052631579e-06,
"loss": 0.4089,
"step": 182
},
{
"Batch Sum": 178.9556884765625,
"accuracy": 0.734375,
"epoch": 0.455,
"step": 182
},
{
"Batch Sum": 181.88690185546875,
"accuracy": 0.84375,
"epoch": 0.455,
"step": 182
},
{
"epoch": 0.4575,
"grad_norm": 16.668292999267578,
"learning_rate": 2.855263157894737e-06,
"loss": 0.5041,
"step": 183
},
{
"Batch Sum": 170.711181640625,
"accuracy": 0.796875,
"epoch": 0.4575,
"step": 183
},
{
"Batch Sum": 169.305908203125,
"accuracy": 0.796875,
"epoch": 0.4575,
"step": 183
},
{
"epoch": 0.46,
"grad_norm": 11.935322761535645,
"learning_rate": 2.842105263157895e-06,
"loss": 0.4036,
"step": 184
},
{
"Batch Sum": 159.673828125,
"accuracy": 0.796875,
"epoch": 0.46,
"step": 184
},
{
"Batch Sum": 148.835693359375,
"accuracy": 0.828125,
"epoch": 0.46,
"step": 184
},
{
"epoch": 0.4625,
"grad_norm": 12.534523010253906,
"learning_rate": 2.828947368421053e-06,
"loss": 0.3995,
"step": 185
},
{
"Batch Sum": 137.3509521484375,
"accuracy": 0.859375,
"epoch": 0.4625,
"step": 185
},
{
"Batch Sum": 137.57501220703125,
"accuracy": 0.875,
"epoch": 0.4625,
"step": 185
},
{
"epoch": 0.465,
"grad_norm": 10.489812850952148,
"learning_rate": 2.815789473684211e-06,
"loss": 0.3355,
"step": 186
},
{
"Batch Sum": 149.32647705078125,
"accuracy": 0.84375,
"epoch": 0.465,
"step": 186
},
{
"Batch Sum": 137.6878662109375,
"accuracy": 0.890625,
"epoch": 0.465,
"step": 186
},
{
"epoch": 0.4675,
"grad_norm": 10.799548149108887,
"learning_rate": 2.8026315789473683e-06,
"loss": 0.3154,
"step": 187
},
{
"Batch Sum": 140.84820556640625,
"accuracy": 0.84375,
"epoch": 0.4675,
"step": 187
},
{
"Batch Sum": 137.59814453125,
"accuracy": 0.796875,
"epoch": 0.4675,
"step": 187
},
{
"epoch": 0.47,
"grad_norm": 10.771398544311523,
"learning_rate": 2.789473684210526e-06,
"loss": 0.3794,
"step": 188
},
{
"Batch Sum": 120.0439453125,
"accuracy": 0.828125,
"epoch": 0.47,
"step": 188
},
{
"Batch Sum": 132.53411865234375,
"accuracy": 0.890625,
"epoch": 0.47,
"step": 188
},
{
"epoch": 0.4725,
"grad_norm": 8.793214797973633,
"learning_rate": 2.776315789473684e-06,
"loss": 0.3329,
"step": 189
},
{
"Batch Sum": 148.12432861328125,
"accuracy": 0.796875,
"epoch": 0.4725,
"step": 189
},
{
"Batch Sum": 123.746826171875,
"accuracy": 0.765625,
"epoch": 0.4725,
"step": 189
},
{
"epoch": 0.475,
"grad_norm": 10.795166969299316,
"learning_rate": 2.7631578947368424e-06,
"loss": 0.3926,
"step": 190
},
{
"Batch Sum": 144.51519775390625,
"accuracy": 0.796875,
"epoch": 0.475,
"step": 190
},
{
"Batch Sum": 156.36366271972656,
"accuracy": 0.84375,
"epoch": 0.475,
"step": 190
},
{
"epoch": 0.4775,
"grad_norm": 11.242524147033691,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.4088,
"step": 191
},
{
"Batch Sum": 132.588134765625,
"accuracy": 0.734375,
"epoch": 0.4775,
"step": 191
},
{
"Batch Sum": 132.279296875,
"accuracy": 0.765625,
"epoch": 0.4775,
"step": 191
},
{
"epoch": 0.48,
"grad_norm": 11.10943603515625,
"learning_rate": 2.7368421052631583e-06,
"loss": 0.4502,
"step": 192
},
{
"Batch Sum": 127.26214599609375,
"accuracy": 0.765625,
"epoch": 0.48,
"step": 192
},
{
"Batch Sum": 120.28961181640625,
"accuracy": 0.734375,
"epoch": 0.48,
"step": 192
},
{
"epoch": 0.4825,
"grad_norm": 10.466278076171875,
"learning_rate": 2.723684210526316e-06,
"loss": 0.4388,
"step": 193
},
{
"Batch Sum": 137.3543701171875,
"accuracy": 0.875,
"epoch": 0.4825,
"step": 193
},
{
"Batch Sum": 126.865234375,
"accuracy": 0.875,
"epoch": 0.4825,
"step": 193
},
{
"epoch": 0.485,
"grad_norm": 9.617724418640137,
"learning_rate": 2.710526315789474e-06,
"loss": 0.3097,
"step": 194
},
{
"Batch Sum": 141.0351104736328,
"accuracy": 0.859375,
"epoch": 0.485,
"step": 194
},
{
"Batch Sum": 115.21342468261719,
"accuracy": 0.765625,
"epoch": 0.485,
"step": 194
},
{
"epoch": 0.4875,
"grad_norm": 9.829240798950195,
"learning_rate": 2.697368421052632e-06,
"loss": 0.4003,
"step": 195
},
{
"Batch Sum": 140.189697265625,
"accuracy": 0.875,
"epoch": 0.4875,
"step": 195
},
{
"Batch Sum": 147.9466552734375,
"accuracy": 0.84375,
"epoch": 0.4875,
"step": 195
},
{
"epoch": 0.49,
"grad_norm": 9.37118911743164,
"learning_rate": 2.68421052631579e-06,
"loss": 0.3677,
"step": 196
},
{
"Batch Sum": 148.1060791015625,
"accuracy": 0.71875,
"epoch": 0.49,
"step": 196
},
{
"Batch Sum": 131.451171875,
"accuracy": 0.84375,
"epoch": 0.49,
"step": 196
},
{
"epoch": 0.4925,
"grad_norm": 11.06047534942627,
"learning_rate": 2.6710526315789474e-06,
"loss": 0.4126,
"step": 197
},
{
"Batch Sum": 144.2012939453125,
"accuracy": 0.828125,
"epoch": 0.4925,
"step": 197
},
{
"Batch Sum": 141.30419921875,
"accuracy": 0.8125,
"epoch": 0.4925,
"step": 197
},
{
"epoch": 0.495,
"grad_norm": 9.70101261138916,
"learning_rate": 2.6578947368421053e-06,
"loss": 0.3397,
"step": 198
},
{
"Batch Sum": 149.09429931640625,
"accuracy": 0.828125,
"epoch": 0.495,
"step": 198
},
{
"Batch Sum": 141.2196044921875,
"accuracy": 0.828125,
"epoch": 0.495,
"step": 198
},
{
"epoch": 0.4975,
"grad_norm": 9.994836807250977,
"learning_rate": 2.644736842105263e-06,
"loss": 0.3922,
"step": 199
},
{
"Batch Sum": 150.16015625,
"accuracy": 0.78125,
"epoch": 0.4975,
"step": 199
},
{
"Batch Sum": 149.1392822265625,
"accuracy": 0.75,
"epoch": 0.4975,
"step": 199
},
{
"epoch": 0.5,
"grad_norm": 10.291372299194336,
"learning_rate": 2.631578947368421e-06,
"loss": 0.4164,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}