|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9954430379746837, |
|
"eval_steps": 500, |
|
"global_step": 1479, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.020253164556962026, |
|
"grad_norm": 2.8774304487643567, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7568, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04050632911392405, |
|
"grad_norm": 2.5344540389225125, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6505, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.060759493670886074, |
|
"grad_norm": 1.5191386004765144, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6288, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0810126582278481, |
|
"grad_norm": 1.4141551743390433, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6131, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10126582278481013, |
|
"grad_norm": 2.4862200047694842, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6033, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12151898734177215, |
|
"grad_norm": 1.4750300815086217, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5973, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14177215189873418, |
|
"grad_norm": 1.4915548804694947, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5926, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.1620253164556962, |
|
"grad_norm": 1.892372524650567, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5879, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.18227848101265823, |
|
"grad_norm": 1.709681770675116, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5863, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.20253164556962025, |
|
"grad_norm": 2.254618419373403, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5891, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22278481012658227, |
|
"grad_norm": 1.5236540815926303, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5814, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2430379746835443, |
|
"grad_norm": 1.3816913570178588, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5792, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.26329113924050634, |
|
"grad_norm": 1.7729448633576892, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5783, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.28354430379746837, |
|
"grad_norm": 1.2473090919035952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5676, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.3037974683544304, |
|
"grad_norm": 1.716302858423637, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5787, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3240506329113924, |
|
"grad_norm": 1.9589952523912002, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5777, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.34430379746835443, |
|
"grad_norm": 2.496944084572566, |
|
"learning_rate": 5e-06, |
|
"loss": 0.576, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.36455696202531646, |
|
"grad_norm": 2.2336080144547434, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5711, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3848101265822785, |
|
"grad_norm": 1.7307311567783212, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5698, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 1.6121196330441476, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5652, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4253164556962025, |
|
"grad_norm": 1.533560416197585, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5617, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.44556962025316454, |
|
"grad_norm": 1.452450126915398, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5672, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.46582278481012657, |
|
"grad_norm": 1.7741970920673447, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5672, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4860759493670886, |
|
"grad_norm": 1.3679529307417164, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5639, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 1.483375368159423, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5669, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5265822784810127, |
|
"grad_norm": 1.5503282129766518, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5537, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5468354430379747, |
|
"grad_norm": 1.5972505520379774, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5631, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5670886075949367, |
|
"grad_norm": 1.5484775869530372, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5585, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5873417721518988, |
|
"grad_norm": 1.3036495786562399, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5582, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6075949367088608, |
|
"grad_norm": 1.3696617213668716, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5587, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6278481012658228, |
|
"grad_norm": 1.2340289028597662, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5641, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6481012658227848, |
|
"grad_norm": 1.4123806560174987, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5598, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6683544303797468, |
|
"grad_norm": 1.5967684052171425, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5597, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.6886075949367089, |
|
"grad_norm": 1.2564531551518137, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5593, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7088607594936709, |
|
"grad_norm": 1.4426013993235978, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5606, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7291139240506329, |
|
"grad_norm": 1.2005036232032604, |
|
"learning_rate": 5e-06, |
|
"loss": 0.563, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7493670886075949, |
|
"grad_norm": 1.3290797941015413, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5558, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.769620253164557, |
|
"grad_norm": 1.5577348440553334, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5529, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.789873417721519, |
|
"grad_norm": 1.5248581244957846, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5558, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 1.1332353996382762, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5556, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.830379746835443, |
|
"grad_norm": 1.3714411687499122, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5506, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.850632911392405, |
|
"grad_norm": 1.1484372834767997, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5591, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.8708860759493671, |
|
"grad_norm": 1.1107967078130156, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5523, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.8911392405063291, |
|
"grad_norm": 1.1798432479054304, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5579, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.9113924050632911, |
|
"grad_norm": 1.3238490582363345, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5491, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9316455696202531, |
|
"grad_norm": 1.1321205960261402, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5538, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.9518987341772152, |
|
"grad_norm": 1.2839586168848394, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5544, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.9721518987341772, |
|
"grad_norm": 1.0897398030657333, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5512, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.9924050632911392, |
|
"grad_norm": 1.1575734050873214, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5479, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.9984810126582279, |
|
"eval_loss": 0.06885610520839691, |
|
"eval_runtime": 505.9238, |
|
"eval_samples_per_second": 26.296, |
|
"eval_steps_per_second": 0.411, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 1.9053041856650703, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5071, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0329113924050632, |
|
"grad_norm": 1.4401660522739657, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4726, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.0531645569620254, |
|
"grad_norm": 1.3320968796532682, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4683, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.0734177215189873, |
|
"grad_norm": 1.7576879811603687, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4693, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.0936708860759494, |
|
"grad_norm": 1.498480760178038, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4649, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.1139240506329113, |
|
"grad_norm": 1.6076370988752315, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4668, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.1341772151898735, |
|
"grad_norm": 2.4418881588463153, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4638, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.1544303797468354, |
|
"grad_norm": 1.5160243794923711, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4699, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.1746835443037975, |
|
"grad_norm": 1.6384572329110083, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4684, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.1949367088607594, |
|
"grad_norm": 1.5146441040161251, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4612, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.2151898734177216, |
|
"grad_norm": 1.8424704593389885, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4689, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.2354430379746835, |
|
"grad_norm": 1.2743268882656655, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4648, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.2556962025316456, |
|
"grad_norm": 1.3305136914110434, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4688, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.2759493670886077, |
|
"grad_norm": 1.2563425944618831, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4703, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.2962025316455696, |
|
"grad_norm": 1.2030246957550959, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4706, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.3164556962025316, |
|
"grad_norm": 1.1706301701079003, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4757, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.3367088607594937, |
|
"grad_norm": 1.342335289118665, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4647, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.3569620253164558, |
|
"grad_norm": 1.3845959897798454, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4791, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.3772151898734177, |
|
"grad_norm": 1.5821485126658388, |
|
"learning_rate": 5e-06, |
|
"loss": 0.475, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.3974683544303796, |
|
"grad_norm": 1.3559346968461559, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4724, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.4177215189873418, |
|
"grad_norm": 1.7083072029059938, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4696, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.437974683544304, |
|
"grad_norm": 1.4933144469805344, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4744, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.4582278481012658, |
|
"grad_norm": 1.693856562335361, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4745, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.4784810126582277, |
|
"grad_norm": 1.1953534031290156, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4769, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.4987341772151899, |
|
"grad_norm": 1.2282091851821244, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4705, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.518987341772152, |
|
"grad_norm": 1.3246981766857313, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4736, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.539240506329114, |
|
"grad_norm": 1.3640960444668195, |
|
"learning_rate": 5e-06, |
|
"loss": 0.476, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.5594936708860758, |
|
"grad_norm": 1.3598652554278743, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4754, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.579746835443038, |
|
"grad_norm": 1.394674387460495, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4761, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 1.6429795597253631, |
|
"learning_rate": 5e-06, |
|
"loss": 0.479, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.620253164556962, |
|
"grad_norm": 1.8755647208576927, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4738, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.640506329113924, |
|
"grad_norm": 1.5379253414384246, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4658, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.660759493670886, |
|
"grad_norm": 1.5793638860512589, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4747, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.6810126582278482, |
|
"grad_norm": 1.4748529322455386, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4701, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.70126582278481, |
|
"grad_norm": 1.2892727579865184, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4719, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.721518987341772, |
|
"grad_norm": 1.3595053928671879, |
|
"learning_rate": 5e-06, |
|
"loss": 0.474, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.7417721518987341, |
|
"grad_norm": 1.2849260840585164, |
|
"learning_rate": 5e-06, |
|
"loss": 0.474, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.7620253164556963, |
|
"grad_norm": 1.35522219244357, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4772, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.7822784810126582, |
|
"grad_norm": 1.226173238642379, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4778, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.80253164556962, |
|
"grad_norm": 1.2694514957146457, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4747, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.8227848101265822, |
|
"grad_norm": 1.2788355163177385, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4753, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.8430379746835444, |
|
"grad_norm": 1.2200366908384497, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4783, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.8632911392405065, |
|
"grad_norm": 1.247670305705125, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4774, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.8835443037974684, |
|
"grad_norm": 1.187274714004516, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4742, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.9037974683544303, |
|
"grad_norm": 1.1986542333604602, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4782, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.9240506329113924, |
|
"grad_norm": 1.3407807221023489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4788, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.9443037974683546, |
|
"grad_norm": 1.0996638488393005, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4833, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.9645569620253165, |
|
"grad_norm": 1.1801562120830218, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4819, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.9848101265822784, |
|
"grad_norm": 1.2584627671933288, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4827, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.998987341772152, |
|
"eval_loss": 0.06906617432832718, |
|
"eval_runtime": 507.4862, |
|
"eval_samples_per_second": 26.215, |
|
"eval_steps_per_second": 0.41, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 2.0050632911392405, |
|
"grad_norm": 1.9542006060804669, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4561, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.0253164556962027, |
|
"grad_norm": 1.935000758745827, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3924, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.0455696202531644, |
|
"grad_norm": 1.5425227007244047, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3882, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.0658227848101265, |
|
"grad_norm": 1.406671374378772, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3834, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.0860759493670886, |
|
"grad_norm": 1.3225741000814224, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3829, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.1063291139240508, |
|
"grad_norm": 1.3087854581289653, |
|
"learning_rate": 5e-06, |
|
"loss": 0.381, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.1265822784810124, |
|
"grad_norm": 1.5055247128397313, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3915, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.1468354430379746, |
|
"grad_norm": 1.3253448003192276, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3867, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.1670886075949367, |
|
"grad_norm": 1.2671985080624293, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3909, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.187341772151899, |
|
"grad_norm": 1.455516007687749, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3887, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.207594936708861, |
|
"grad_norm": 1.3748988810626455, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3865, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.2278481012658227, |
|
"grad_norm": 1.3586092724615417, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3953, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.248101265822785, |
|
"grad_norm": 1.3431131885693126, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3909, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.268354430379747, |
|
"grad_norm": 1.2858382727150257, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3937, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.2886075949367086, |
|
"grad_norm": 1.3631461037325234, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3953, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.3088607594936708, |
|
"grad_norm": 1.3945141903005032, |
|
"learning_rate": 5e-06, |
|
"loss": 0.391, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.329113924050633, |
|
"grad_norm": 1.5759793822570192, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3975, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.349367088607595, |
|
"grad_norm": 2.039342813633422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3977, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.369620253164557, |
|
"grad_norm": 1.6975446566072463, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3977, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.389873417721519, |
|
"grad_norm": 1.802922366861523, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3969, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.410126582278481, |
|
"grad_norm": 1.854331261983343, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3958, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.430379746835443, |
|
"grad_norm": 1.4940689520172188, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3938, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.4506329113924052, |
|
"grad_norm": 1.7259555359811671, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3929, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.470886075949367, |
|
"grad_norm": 1.5736139090062196, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3969, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.491139240506329, |
|
"grad_norm": 1.4329253407509266, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3967, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.511392405063291, |
|
"grad_norm": 1.4901567005006235, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4009, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.5316455696202533, |
|
"grad_norm": 1.3522242258508488, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3951, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.5518987341772155, |
|
"grad_norm": 1.536738400126712, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3913, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.572151898734177, |
|
"grad_norm": 1.2953991724559513, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3975, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.5924050632911393, |
|
"grad_norm": 1.2523173423982397, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3996, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.6126582278481014, |
|
"grad_norm": 1.3336723981706788, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4052, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.632911392405063, |
|
"grad_norm": 1.472845603089787, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4003, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.6531645569620252, |
|
"grad_norm": 1.288987152067692, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3998, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.6734177215189874, |
|
"grad_norm": 1.403806833032338, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4027, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.6936708860759495, |
|
"grad_norm": 1.272279373249665, |
|
"learning_rate": 5e-06, |
|
"loss": 0.405, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.7139240506329116, |
|
"grad_norm": 1.3816960642618357, |
|
"learning_rate": 5e-06, |
|
"loss": 0.403, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.7341772151898733, |
|
"grad_norm": 1.3287384175883568, |
|
"learning_rate": 5e-06, |
|
"loss": 0.403, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.7544303797468355, |
|
"grad_norm": 1.3061458943923105, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3987, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.7746835443037976, |
|
"grad_norm": 1.5895834597948455, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4048, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.7949367088607593, |
|
"grad_norm": 1.3507916034552265, |
|
"learning_rate": 5e-06, |
|
"loss": 0.402, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.8151898734177214, |
|
"grad_norm": 1.4325413783265324, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4048, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.8354430379746836, |
|
"grad_norm": 1.2903382408076844, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3986, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.8556962025316457, |
|
"grad_norm": 1.337292137509307, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4068, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.875949367088608, |
|
"grad_norm": 1.2790521065554994, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4032, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.8962025316455695, |
|
"grad_norm": 1.2629153464134446, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4041, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.9164556962025316, |
|
"grad_norm": 1.3603753931715636, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4065, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.9367088607594938, |
|
"grad_norm": 1.3567858575980736, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4155, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.9569620253164555, |
|
"grad_norm": 1.2575983614178015, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4037, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.9772151898734176, |
|
"grad_norm": 1.3268109235441432, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4073, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"eval_loss": 0.07261991500854492, |
|
"eval_runtime": 504.5612, |
|
"eval_samples_per_second": 26.367, |
|
"eval_steps_per_second": 0.412, |
|
"step": 1479 |
|
}, |
|
{ |
|
"epoch": 2.9954430379746837, |
|
"step": 1479, |
|
"total_flos": 2477170706350080.0, |
|
"train_loss": 0.4815750637918489, |
|
"train_runtime": 84061.7352, |
|
"train_samples_per_second": 9.02, |
|
"train_steps_per_second": 0.018 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1479, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2477170706350080.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|