Spaces:
Sleeping
Sleeping
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 2.0, | |
"global_step": 5178, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0, | |
"learning_rate": 5e-06, | |
"loss": 3.0388, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1e-05, | |
"loss": 2.9144, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.01, | |
"learning_rate": 1.5e-05, | |
"loss": 2.4056, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2e-05, | |
"loss": 1.3107, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 2.5e-05, | |
"loss": 0.8564, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.02, | |
"learning_rate": 3e-05, | |
"loss": 0.6775, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 3.5e-05, | |
"loss": 0.54, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 4e-05, | |
"loss": 0.5361, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 4.5e-05, | |
"loss": 0.4847, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5e-05, | |
"loss": 0.4707, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.04, | |
"eval_loss": 0.4903041422367096, | |
"eval_runtime": 241.1648, | |
"eval_samples_per_second": 2.073, | |
"eval_steps_per_second": 0.066, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.04, | |
"learning_rate": 5.500000000000001e-05, | |
"loss": 0.4843, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6e-05, | |
"loss": 0.4912, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 6.500000000000001e-05, | |
"loss": 0.453, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 7e-05, | |
"loss": 0.4396, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 7.500000000000001e-05, | |
"loss": 0.4066, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.06, | |
"learning_rate": 8e-05, | |
"loss": 0.4339, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 8.5e-05, | |
"loss": 0.4042, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 9e-05, | |
"loss": 0.4036, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 9.5e-05, | |
"loss": 0.4017, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0001, | |
"loss": 0.4132, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.08, | |
"eval_loss": 0.4102957546710968, | |
"eval_runtime": 237.7984, | |
"eval_samples_per_second": 2.103, | |
"eval_steps_per_second": 0.067, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.979911611088791e-05, | |
"loss": 0.4073, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 9.959823222177582e-05, | |
"loss": 0.3744, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 9.939734833266372e-05, | |
"loss": 0.4091, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.09, | |
"learning_rate": 9.919646444355163e-05, | |
"loss": 0.3846, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 9.899558055443953e-05, | |
"loss": 0.4056, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 9.879469666532744e-05, | |
"loss": 0.3713, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 9.859381277621536e-05, | |
"loss": 0.3851, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 9.839292888710326e-05, | |
"loss": 0.3802, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 9.819204499799116e-05, | |
"loss": 0.3883, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 9.799116110887908e-05, | |
"loss": 0.3939, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.12, | |
"eval_loss": 0.3881138265132904, | |
"eval_runtime": 233.0966, | |
"eval_samples_per_second": 2.145, | |
"eval_steps_per_second": 0.069, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 9.779027721976698e-05, | |
"loss": 0.3892, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.12, | |
"learning_rate": 9.758939333065489e-05, | |
"loss": 0.3568, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 9.73885094415428e-05, | |
"loss": 0.3924, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 9.71876255524307e-05, | |
"loss": 0.3665, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 9.69867416633186e-05, | |
"loss": 0.3685, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 9.678585777420651e-05, | |
"loss": 0.3713, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.14, | |
"learning_rate": 9.658497388509442e-05, | |
"loss": 0.356, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 9.638408999598232e-05, | |
"loss": 0.3484, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 9.618320610687024e-05, | |
"loss": 0.3865, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 9.598232221775813e-05, | |
"loss": 0.3645, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.15, | |
"eval_loss": 0.3724190890789032, | |
"eval_runtime": 233.3567, | |
"eval_samples_per_second": 2.143, | |
"eval_steps_per_second": 0.069, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 9.578143832864604e-05, | |
"loss": 0.3665, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 9.558055443953396e-05, | |
"loss": 0.3496, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 9.537967055042187e-05, | |
"loss": 0.332, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 9.517878666130977e-05, | |
"loss": 0.3624, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.17, | |
"learning_rate": 9.497790277219766e-05, | |
"loss": 0.3503, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 9.477701888308558e-05, | |
"loss": 0.337, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 9.457613499397349e-05, | |
"loss": 0.376, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 9.43752511048614e-05, | |
"loss": 0.3304, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 9.41743672157493e-05, | |
"loss": 0.3644, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.19, | |
"learning_rate": 9.39734833266372e-05, | |
"loss": 0.3618, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.19, | |
"eval_loss": 0.3597237765789032, | |
"eval_runtime": 233.4866, | |
"eval_samples_per_second": 2.141, | |
"eval_steps_per_second": 0.069, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 9.377259943752511e-05, | |
"loss": 0.3855, | |
"step": 510 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 9.357171554841302e-05, | |
"loss": 0.3652, | |
"step": 520 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 9.337083165930092e-05, | |
"loss": 0.3621, | |
"step": 530 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 9.316994777018884e-05, | |
"loss": 0.3318, | |
"step": 540 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 9.296906388107675e-05, | |
"loss": 0.3409, | |
"step": 550 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 9.276817999196466e-05, | |
"loss": 0.3336, | |
"step": 560 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 9.256729610285255e-05, | |
"loss": 0.3505, | |
"step": 570 | |
}, | |
{ | |
"epoch": 0.22, | |
"learning_rate": 9.236641221374047e-05, | |
"loss": 0.3354, | |
"step": 580 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 9.216552832462837e-05, | |
"loss": 0.3619, | |
"step": 590 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 9.196464443551628e-05, | |
"loss": 0.3491, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.23, | |
"eval_loss": 0.3491559624671936, | |
"eval_runtime": 227.9072, | |
"eval_samples_per_second": 2.194, | |
"eval_steps_per_second": 0.07, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 9.176376054640418e-05, | |
"loss": 0.353, | |
"step": 610 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 9.156287665729209e-05, | |
"loss": 0.3508, | |
"step": 620 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 9.136199276818e-05, | |
"loss": 0.3637, | |
"step": 630 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 9.11611088790679e-05, | |
"loss": 0.3297, | |
"step": 640 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 9.096022498995581e-05, | |
"loss": 0.3578, | |
"step": 650 | |
}, | |
{ | |
"epoch": 0.25, | |
"learning_rate": 9.075934110084371e-05, | |
"loss": 0.3327, | |
"step": 660 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 9.055845721173163e-05, | |
"loss": 0.3336, | |
"step": 670 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 9.035757332261952e-05, | |
"loss": 0.3113, | |
"step": 680 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 9.015668943350743e-05, | |
"loss": 0.3278, | |
"step": 690 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 8.995580554439535e-05, | |
"loss": 0.3358, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.27, | |
"eval_loss": 0.3494350016117096, | |
"eval_runtime": 226.5818, | |
"eval_samples_per_second": 2.207, | |
"eval_steps_per_second": 0.071, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.27, | |
"learning_rate": 8.975492165528326e-05, | |
"loss": 0.3417, | |
"step": 710 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 8.955403776617116e-05, | |
"loss": 0.3451, | |
"step": 720 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 8.935315387705905e-05, | |
"loss": 0.3298, | |
"step": 730 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 8.915226998794697e-05, | |
"loss": 0.3188, | |
"step": 740 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 8.895138609883488e-05, | |
"loss": 0.3211, | |
"step": 750 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 8.875050220972279e-05, | |
"loss": 0.3324, | |
"step": 760 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 8.854961832061069e-05, | |
"loss": 0.3269, | |
"step": 770 | |
}, | |
{ | |
"epoch": 0.3, | |
"learning_rate": 8.83487344314986e-05, | |
"loss": 0.3308, | |
"step": 780 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 8.81478505423865e-05, | |
"loss": 0.341, | |
"step": 790 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 8.794696665327441e-05, | |
"loss": 0.3299, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.31, | |
"eval_loss": 0.3420061469078064, | |
"eval_runtime": 222.718, | |
"eval_samples_per_second": 2.245, | |
"eval_steps_per_second": 0.072, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 8.774608276416231e-05, | |
"loss": 0.3289, | |
"step": 810 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 8.754519887505023e-05, | |
"loss": 0.328, | |
"step": 820 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 8.734431498593814e-05, | |
"loss": 0.3464, | |
"step": 830 | |
}, | |
{ | |
"epoch": 0.32, | |
"learning_rate": 8.714343109682603e-05, | |
"loss": 0.3227, | |
"step": 840 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 8.694254720771394e-05, | |
"loss": 0.352, | |
"step": 850 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 8.674166331860186e-05, | |
"loss": 0.3555, | |
"step": 860 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 8.654077942948976e-05, | |
"loss": 0.3322, | |
"step": 870 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 8.633989554037767e-05, | |
"loss": 0.316, | |
"step": 880 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 8.613901165126557e-05, | |
"loss": 0.304, | |
"step": 890 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 8.593812776215348e-05, | |
"loss": 0.3257, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.35, | |
"eval_loss": 0.3412737250328064, | |
"eval_runtime": 230.8687, | |
"eval_samples_per_second": 2.166, | |
"eval_steps_per_second": 0.069, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.35, | |
"learning_rate": 8.573724387304139e-05, | |
"loss": 0.3339, | |
"step": 910 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 8.553635998392929e-05, | |
"loss": 0.3163, | |
"step": 920 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 8.53354760948172e-05, | |
"loss": 0.3223, | |
"step": 930 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 8.513459220570512e-05, | |
"loss": 0.3423, | |
"step": 940 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 8.493370831659301e-05, | |
"loss": 0.3175, | |
"step": 950 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 8.473282442748092e-05, | |
"loss": 0.3249, | |
"step": 960 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 8.453194053836882e-05, | |
"loss": 0.3095, | |
"step": 970 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 8.433105664925674e-05, | |
"loss": 0.3324, | |
"step": 980 | |
}, | |
{ | |
"epoch": 0.38, | |
"learning_rate": 8.413017276014465e-05, | |
"loss": 0.3276, | |
"step": 990 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 8.392928887103255e-05, | |
"loss": 0.345, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.39, | |
"eval_loss": 0.3368094265460968, | |
"eval_runtime": 233.1619, | |
"eval_samples_per_second": 2.144, | |
"eval_steps_per_second": 0.069, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 8.372840498192044e-05, | |
"loss": 0.3389, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 8.352752109280836e-05, | |
"loss": 0.321, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 8.332663720369627e-05, | |
"loss": 0.3497, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 0.4, | |
"learning_rate": 8.312575331458418e-05, | |
"loss": 0.3283, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 8.292486942547208e-05, | |
"loss": 0.31, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 8.272398553635999e-05, | |
"loss": 0.3325, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 8.252310164724789e-05, | |
"loss": 0.3103, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 8.23222177581358e-05, | |
"loss": 0.3301, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 8.21213338690237e-05, | |
"loss": 0.3295, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 8.192044997991162e-05, | |
"loss": 0.3301, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.42, | |
"eval_loss": 0.3294503390789032, | |
"eval_runtime": 233.058, | |
"eval_samples_per_second": 2.145, | |
"eval_steps_per_second": 0.069, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 8.171956609079953e-05, | |
"loss": 0.3269, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 0.43, | |
"learning_rate": 8.151868220168742e-05, | |
"loss": 0.347, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 8.131779831257533e-05, | |
"loss": 0.3189, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 8.111691442346325e-05, | |
"loss": 0.3251, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 8.091603053435115e-05, | |
"loss": 0.305, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 8.071514664523906e-05, | |
"loss": 0.3054, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 0.45, | |
"learning_rate": 8.051426275612696e-05, | |
"loss": 0.3426, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 8.031337886701487e-05, | |
"loss": 0.3206, | |
"step": 1180 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 8.011249497790278e-05, | |
"loss": 0.334, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 7.991161108879068e-05, | |
"loss": 0.3311, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.46, | |
"eval_loss": 0.3330426812171936, | |
"eval_runtime": 231.4394, | |
"eval_samples_per_second": 2.16, | |
"eval_steps_per_second": 0.069, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 7.971072719967859e-05, | |
"loss": 0.3326, | |
"step": 1210 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 7.950984331056651e-05, | |
"loss": 0.3229, | |
"step": 1220 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 7.93089594214544e-05, | |
"loss": 0.3252, | |
"step": 1230 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 7.91080755323423e-05, | |
"loss": 0.314, | |
"step": 1240 | |
}, | |
{ | |
"epoch": 0.48, | |
"learning_rate": 7.890719164323021e-05, | |
"loss": 0.3191, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 7.870630775411813e-05, | |
"loss": 0.3193, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 7.850542386500604e-05, | |
"loss": 0.3183, | |
"step": 1270 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 7.830453997589394e-05, | |
"loss": 0.3141, | |
"step": 1280 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 7.810365608678183e-05, | |
"loss": 0.3069, | |
"step": 1290 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 7.790277219766975e-05, | |
"loss": 0.3203, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.5, | |
"eval_loss": 0.32666015625, | |
"eval_runtime": 228.195, | |
"eval_samples_per_second": 2.191, | |
"eval_steps_per_second": 0.07, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 7.770188830855766e-05, | |
"loss": 0.3044, | |
"step": 1310 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 7.750100441944557e-05, | |
"loss": 0.3052, | |
"step": 1320 | |
}, | |
{ | |
"epoch": 0.51, | |
"learning_rate": 7.730012053033347e-05, | |
"loss": 0.3156, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 7.709923664122138e-05, | |
"loss": 0.3271, | |
"step": 1340 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 7.689835275210928e-05, | |
"loss": 0.2991, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 7.669746886299719e-05, | |
"loss": 0.306, | |
"step": 1360 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 7.64965849738851e-05, | |
"loss": 0.3122, | |
"step": 1370 | |
}, | |
{ | |
"epoch": 0.53, | |
"learning_rate": 7.629570108477301e-05, | |
"loss": 0.3101, | |
"step": 1380 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 7.609481719566092e-05, | |
"loss": 0.3106, | |
"step": 1390 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 7.589393330654881e-05, | |
"loss": 0.3177, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.54, | |
"eval_loss": 0.3282645046710968, | |
"eval_runtime": 228.5175, | |
"eval_samples_per_second": 2.188, | |
"eval_steps_per_second": 0.07, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 7.569304941743672e-05, | |
"loss": 0.2955, | |
"step": 1410 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 7.549216552832464e-05, | |
"loss": 0.3287, | |
"step": 1420 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 7.529128163921254e-05, | |
"loss": 0.3224, | |
"step": 1430 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 7.509039775010045e-05, | |
"loss": 0.3231, | |
"step": 1440 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 7.488951386098834e-05, | |
"loss": 0.3218, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 0.56, | |
"learning_rate": 7.468862997187626e-05, | |
"loss": 0.3167, | |
"step": 1460 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 7.448774608276417e-05, | |
"loss": 0.3252, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 7.428686219365207e-05, | |
"loss": 0.315, | |
"step": 1480 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 7.408597830453998e-05, | |
"loss": 0.3012, | |
"step": 1490 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 7.38850944154279e-05, | |
"loss": 0.3055, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 0.58, | |
"eval_loss": 0.3236956000328064, | |
"eval_runtime": 235.4381, | |
"eval_samples_per_second": 2.124, | |
"eval_steps_per_second": 0.068, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 0.58, | |
"learning_rate": 7.368421052631579e-05, | |
"loss": 0.3194, | |
"step": 1510 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 7.34833266372037e-05, | |
"loss": 0.3254, | |
"step": 1520 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 7.32824427480916e-05, | |
"loss": 0.3108, | |
"step": 1530 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 7.308155885897952e-05, | |
"loss": 0.3029, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 7.288067496986743e-05, | |
"loss": 0.319, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 7.267979108075532e-05, | |
"loss": 0.3114, | |
"step": 1560 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 7.247890719164322e-05, | |
"loss": 0.2944, | |
"step": 1570 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 7.227802330253114e-05, | |
"loss": 0.3255, | |
"step": 1580 | |
}, | |
{ | |
"epoch": 0.61, | |
"learning_rate": 7.207713941341905e-05, | |
"loss": 0.321, | |
"step": 1590 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 7.187625552430696e-05, | |
"loss": 0.3235, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 0.62, | |
"eval_loss": 0.3245675265789032, | |
"eval_runtime": 229.0197, | |
"eval_samples_per_second": 2.183, | |
"eval_steps_per_second": 0.07, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 7.167537163519486e-05, | |
"loss": 0.2904, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 7.147448774608277e-05, | |
"loss": 0.3511, | |
"step": 1620 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 7.127360385697067e-05, | |
"loss": 0.3088, | |
"step": 1630 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 7.107271996785858e-05, | |
"loss": 0.3015, | |
"step": 1640 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 7.087183607874648e-05, | |
"loss": 0.3, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 0.64, | |
"learning_rate": 7.06709521896344e-05, | |
"loss": 0.3487, | |
"step": 1660 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 7.04700683005223e-05, | |
"loss": 0.3029, | |
"step": 1670 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 7.02691844114102e-05, | |
"loss": 0.307, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 7.006830052229811e-05, | |
"loss": 0.3026, | |
"step": 1690 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 6.986741663318603e-05, | |
"loss": 0.3203, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 0.66, | |
"eval_loss": 0.3210797905921936, | |
"eval_runtime": 235.7603, | |
"eval_samples_per_second": 2.121, | |
"eval_steps_per_second": 0.068, | |
"step": 1700 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 6.966653274407393e-05, | |
"loss": 0.3036, | |
"step": 1710 | |
}, | |
{ | |
"epoch": 0.66, | |
"learning_rate": 6.946564885496184e-05, | |
"loss": 0.32, | |
"step": 1720 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 6.926476496584973e-05, | |
"loss": 0.3065, | |
"step": 1730 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 6.906388107673765e-05, | |
"loss": 0.2972, | |
"step": 1740 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 6.886299718762556e-05, | |
"loss": 0.2972, | |
"step": 1750 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 6.866211329851346e-05, | |
"loss": 0.321, | |
"step": 1760 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 6.846122940940137e-05, | |
"loss": 0.3206, | |
"step": 1770 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 6.826034552028927e-05, | |
"loss": 0.304, | |
"step": 1780 | |
}, | |
{ | |
"epoch": 0.69, | |
"learning_rate": 6.805946163117718e-05, | |
"loss": 0.3101, | |
"step": 1790 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 6.785857774206509e-05, | |
"loss": 0.3263, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 0.7, | |
"eval_loss": 0.3179059624671936, | |
"eval_runtime": 228.96, | |
"eval_samples_per_second": 2.184, | |
"eval_steps_per_second": 0.07, | |
"step": 1800 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 6.765769385295299e-05, | |
"loss": 0.3311, | |
"step": 1810 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 6.745680996384091e-05, | |
"loss": 0.3047, | |
"step": 1820 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 6.725592607472882e-05, | |
"loss": 0.3085, | |
"step": 1830 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 6.705504218561671e-05, | |
"loss": 0.3083, | |
"step": 1840 | |
}, | |
{ | |
"epoch": 0.71, | |
"learning_rate": 6.685415829650461e-05, | |
"loss": 0.3225, | |
"step": 1850 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 6.665327440739253e-05, | |
"loss": 0.3136, | |
"step": 1860 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 6.645239051828044e-05, | |
"loss": 0.3191, | |
"step": 1870 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 6.625150662916835e-05, | |
"loss": 0.3547, | |
"step": 1880 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 6.605062274005625e-05, | |
"loss": 0.3466, | |
"step": 1890 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 6.584973885094416e-05, | |
"loss": 0.2985, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 0.73, | |
"eval_loss": 0.324951171875, | |
"eval_runtime": 228.2301, | |
"eval_samples_per_second": 2.191, | |
"eval_steps_per_second": 0.07, | |
"step": 1900 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 6.564885496183206e-05, | |
"loss": 0.3164, | |
"step": 1910 | |
}, | |
{ | |
"epoch": 0.74, | |
"learning_rate": 6.544797107271997e-05, | |
"loss": 0.3171, | |
"step": 1920 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 6.524708718360787e-05, | |
"loss": 0.2916, | |
"step": 1930 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 6.50462032944958e-05, | |
"loss": 0.3082, | |
"step": 1940 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 6.484531940538369e-05, | |
"loss": 0.308, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.464443551627159e-05, | |
"loss": 0.3085, | |
"step": 1960 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.44435516271595e-05, | |
"loss": 0.3062, | |
"step": 1970 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 6.424266773804742e-05, | |
"loss": 0.3214, | |
"step": 1980 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.404178384893532e-05, | |
"loss": 0.3046, | |
"step": 1990 | |
}, | |
{ | |
"epoch": 0.77, | |
"learning_rate": 6.384089995982323e-05, | |
"loss": 0.3161, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 0.77, | |
"eval_loss": 0.3216029703617096, | |
"eval_runtime": 230.2587, | |
"eval_samples_per_second": 2.171, | |
"eval_steps_per_second": 0.069, | |
"step": 2000 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 6.364001607071113e-05, | |
"loss": 0.3274, | |
"step": 2010 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 6.343913218159904e-05, | |
"loss": 0.341, | |
"step": 2020 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 6.323824829248695e-05, | |
"loss": 0.3226, | |
"step": 2030 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 6.303736440337485e-05, | |
"loss": 0.3187, | |
"step": 2040 | |
}, | |
{ | |
"epoch": 0.79, | |
"learning_rate": 6.283648051426276e-05, | |
"loss": 0.3003, | |
"step": 2050 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 6.263559662515066e-05, | |
"loss": 0.2849, | |
"step": 2060 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 6.243471273603857e-05, | |
"loss": 0.3096, | |
"step": 2070 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 6.223382884692648e-05, | |
"loss": 0.2993, | |
"step": 2080 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 6.203294495781438e-05, | |
"loss": 0.2984, | |
"step": 2090 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 6.18320610687023e-05, | |
"loss": 0.2987, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 0.81, | |
"eval_loss": 0.3201729953289032, | |
"eval_runtime": 227.0048, | |
"eval_samples_per_second": 2.203, | |
"eval_steps_per_second": 0.07, | |
"step": 2100 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 6.16311771795902e-05, | |
"loss": 0.3092, | |
"step": 2110 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 6.14302932904781e-05, | |
"loss": 0.2995, | |
"step": 2120 | |
}, | |
{ | |
"epoch": 0.82, | |
"learning_rate": 6.1229409401366e-05, | |
"loss": 0.3114, | |
"step": 2130 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 6.102852551225392e-05, | |
"loss": 0.3077, | |
"step": 2140 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 6.082764162314183e-05, | |
"loss": 0.3121, | |
"step": 2150 | |
}, | |
{ | |
"epoch": 0.83, | |
"learning_rate": 6.0626757734029736e-05, | |
"loss": 0.3042, | |
"step": 2160 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 6.0425873844917635e-05, | |
"loss": 0.3129, | |
"step": 2170 | |
}, | |
{ | |
"epoch": 0.84, | |
"learning_rate": 6.022498995580555e-05, | |
"loss": 0.2843, | |
"step": 2180 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 6.002410606669345e-05, | |
"loss": 0.2973, | |
"step": 2190 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 5.982322217758136e-05, | |
"loss": 0.3405, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 0.85, | |
"eval_loss": 0.3213588297367096, | |
"eval_runtime": 231.1731, | |
"eval_samples_per_second": 2.163, | |
"eval_steps_per_second": 0.069, | |
"step": 2200 | |
}, | |
{ | |
"epoch": 0.85, | |
"learning_rate": 5.962233828846927e-05, | |
"loss": 0.2974, | |
"step": 2210 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 5.942145439935718e-05, | |
"loss": 0.2978, | |
"step": 2220 | |
}, | |
{ | |
"epoch": 0.86, | |
"learning_rate": 5.9220570510245076e-05, | |
"loss": 0.2922, | |
"step": 2230 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 5.901968662113299e-05, | |
"loss": 0.321, | |
"step": 2240 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 5.8818802732020895e-05, | |
"loss": 0.2926, | |
"step": 2250 | |
}, | |
{ | |
"epoch": 0.87, | |
"learning_rate": 5.86179188429088e-05, | |
"loss": 0.3175, | |
"step": 2260 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 5.841703495379671e-05, | |
"loss": 0.2816, | |
"step": 2270 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 5.821615106468461e-05, | |
"loss": 0.2761, | |
"step": 2280 | |
}, | |
{ | |
"epoch": 0.88, | |
"learning_rate": 5.801526717557252e-05, | |
"loss": 0.2786, | |
"step": 2290 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 5.781438328646043e-05, | |
"loss": 0.295, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 0.89, | |
"eval_loss": 0.3176967203617096, | |
"eval_runtime": 229.1487, | |
"eval_samples_per_second": 2.182, | |
"eval_steps_per_second": 0.07, | |
"step": 2300 | |
}, | |
{ | |
"epoch": 0.89, | |
"learning_rate": 5.7613499397348337e-05, | |
"loss": 0.3299, | |
"step": 2310 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 5.741261550823624e-05, | |
"loss": 0.3122, | |
"step": 2320 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 5.7211731619124155e-05, | |
"loss": 0.3288, | |
"step": 2330 | |
}, | |
{ | |
"epoch": 0.9, | |
"learning_rate": 5.7010847730012054e-05, | |
"loss": 0.34, | |
"step": 2340 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 5.680996384089996e-05, | |
"loss": 0.3161, | |
"step": 2350 | |
}, | |
{ | |
"epoch": 0.91, | |
"learning_rate": 5.6609079951787866e-05, | |
"loss": 0.2696, | |
"step": 2360 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 5.640819606267578e-05, | |
"loss": 0.3285, | |
"step": 2370 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 5.6207312173563684e-05, | |
"loss": 0.3158, | |
"step": 2380 | |
}, | |
{ | |
"epoch": 0.92, | |
"learning_rate": 5.600642828445158e-05, | |
"loss": 0.3229, | |
"step": 2390 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 5.5805544395339496e-05, | |
"loss": 0.3168, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 0.93, | |
"eval_loss": 0.3182547390460968, | |
"eval_runtime": 234.4281, | |
"eval_samples_per_second": 2.133, | |
"eval_steps_per_second": 0.068, | |
"step": 2400 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 5.56046605062274e-05, | |
"loss": 0.3167, | |
"step": 2410 | |
}, | |
{ | |
"epoch": 0.93, | |
"learning_rate": 5.540377661711531e-05, | |
"loss": 0.3128, | |
"step": 2420 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 5.520289272800322e-05, | |
"loss": 0.293, | |
"step": 2430 | |
}, | |
{ | |
"epoch": 0.94, | |
"learning_rate": 5.5002008838891126e-05, | |
"loss": 0.3113, | |
"step": 2440 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 5.4801124949779025e-05, | |
"loss": 0.3167, | |
"step": 2450 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 5.460024106066694e-05, | |
"loss": 0.3119, | |
"step": 2460 | |
}, | |
{ | |
"epoch": 0.95, | |
"learning_rate": 5.439935717155484e-05, | |
"loss": 0.295, | |
"step": 2470 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 5.419847328244275e-05, | |
"loss": 0.303, | |
"step": 2480 | |
}, | |
{ | |
"epoch": 0.96, | |
"learning_rate": 5.399758939333066e-05, | |
"loss": 0.3183, | |
"step": 2490 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 5.379670550421857e-05, | |
"loss": 0.3159, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 0.97, | |
"eval_loss": 0.3175223171710968, | |
"eval_runtime": 242.5362, | |
"eval_samples_per_second": 2.062, | |
"eval_steps_per_second": 0.066, | |
"step": 2500 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 5.3595821615106466e-05, | |
"loss": 0.3086, | |
"step": 2510 | |
}, | |
{ | |
"epoch": 0.97, | |
"learning_rate": 5.339493772599438e-05, | |
"loss": 0.3184, | |
"step": 2520 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 5.3194053836882285e-05, | |
"loss": 0.3131, | |
"step": 2530 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 5.299316994777019e-05, | |
"loss": 0.3099, | |
"step": 2540 | |
}, | |
{ | |
"epoch": 0.98, | |
"learning_rate": 5.27922860586581e-05, | |
"loss": 0.3392, | |
"step": 2550 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 5.2591402169546e-05, | |
"loss": 0.3207, | |
"step": 2560 | |
}, | |
{ | |
"epoch": 0.99, | |
"learning_rate": 5.239051828043391e-05, | |
"loss": 0.307, | |
"step": 2570 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 5.218963439132182e-05, | |
"loss": 0.2946, | |
"step": 2580 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 5.198875050220973e-05, | |
"loss": 0.3138, | |
"step": 2590 | |
}, | |
{ | |
"epoch": 1.0, | |
"learning_rate": 5.178786661309763e-05, | |
"loss": 0.2704, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_loss": 0.31640625, | |
"eval_runtime": 234.6863, | |
"eval_samples_per_second": 2.131, | |
"eval_steps_per_second": 0.068, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 5.1586982723985545e-05, | |
"loss": 0.2552, | |
"step": 2610 | |
}, | |
{ | |
"epoch": 1.01, | |
"learning_rate": 5.1386098834873444e-05, | |
"loss": 0.2701, | |
"step": 2620 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 5.118521494576135e-05, | |
"loss": 0.2582, | |
"step": 2630 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 5.098433105664926e-05, | |
"loss": 0.2592, | |
"step": 2640 | |
}, | |
{ | |
"epoch": 1.02, | |
"learning_rate": 5.078344716753717e-05, | |
"loss": 0.2804, | |
"step": 2650 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 5.0582563278425074e-05, | |
"loss": 0.2553, | |
"step": 2660 | |
}, | |
{ | |
"epoch": 1.03, | |
"learning_rate": 5.038167938931297e-05, | |
"loss": 0.2541, | |
"step": 2670 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 5.0180795500200886e-05, | |
"loss": 0.2536, | |
"step": 2680 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 4.997991161108879e-05, | |
"loss": 0.2612, | |
"step": 2690 | |
}, | |
{ | |
"epoch": 1.04, | |
"learning_rate": 4.9779027721976704e-05, | |
"loss": 0.278, | |
"step": 2700 | |
}, | |
{ | |
"epoch": 1.04, | |
"eval_loss": 0.3170689046382904, | |
"eval_runtime": 234.3707, | |
"eval_samples_per_second": 2.133, | |
"eval_steps_per_second": 0.068, | |
"step": 2700 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 4.95781438328646e-05, | |
"loss": 0.2573, | |
"step": 2710 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 4.9377259943752516e-05, | |
"loss": 0.2553, | |
"step": 2720 | |
}, | |
{ | |
"epoch": 1.05, | |
"learning_rate": 4.917637605464042e-05, | |
"loss": 0.2675, | |
"step": 2730 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 4.897549216552833e-05, | |
"loss": 0.2842, | |
"step": 2740 | |
}, | |
{ | |
"epoch": 1.06, | |
"learning_rate": 4.877460827641623e-05, | |
"loss": 0.2523, | |
"step": 2750 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 4.8573724387304146e-05, | |
"loss": 0.2698, | |
"step": 2760 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 4.8372840498192045e-05, | |
"loss": 0.2911, | |
"step": 2770 | |
}, | |
{ | |
"epoch": 1.07, | |
"learning_rate": 4.817195660907996e-05, | |
"loss": 0.2909, | |
"step": 2780 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 4.7971072719967863e-05, | |
"loss": 0.2753, | |
"step": 2790 | |
}, | |
{ | |
"epoch": 1.08, | |
"learning_rate": 4.777018883085577e-05, | |
"loss": 0.2703, | |
"step": 2800 | |
}, | |
{ | |
"epoch": 1.08, | |
"eval_loss": 0.312744140625, | |
"eval_runtime": 232.5523, | |
"eval_samples_per_second": 2.15, | |
"eval_steps_per_second": 0.069, | |
"step": 2800 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 4.7569304941743675e-05, | |
"loss": 0.2798, | |
"step": 2810 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 4.736842105263158e-05, | |
"loss": 0.2738, | |
"step": 2820 | |
}, | |
{ | |
"epoch": 1.09, | |
"learning_rate": 4.716753716351949e-05, | |
"loss": 0.2757, | |
"step": 2830 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 4.69666532744074e-05, | |
"loss": 0.2441, | |
"step": 2840 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 4.67657693852953e-05, | |
"loss": 0.2597, | |
"step": 2850 | |
}, | |
{ | |
"epoch": 1.1, | |
"learning_rate": 4.656488549618321e-05, | |
"loss": 0.2546, | |
"step": 2860 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 4.636400160707112e-05, | |
"loss": 0.2567, | |
"step": 2870 | |
}, | |
{ | |
"epoch": 1.11, | |
"learning_rate": 4.616311771795902e-05, | |
"loss": 0.2677, | |
"step": 2880 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 4.596223382884693e-05, | |
"loss": 0.2544, | |
"step": 2890 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 4.576134993973484e-05, | |
"loss": 0.2596, | |
"step": 2900 | |
}, | |
{ | |
"epoch": 1.12, | |
"eval_loss": 0.312744140625, | |
"eval_runtime": 234.4348, | |
"eval_samples_per_second": 2.133, | |
"eval_steps_per_second": 0.068, | |
"step": 2900 | |
}, | |
{ | |
"epoch": 1.12, | |
"learning_rate": 4.556046605062274e-05, | |
"loss": 0.2678, | |
"step": 2910 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 4.535958216151065e-05, | |
"loss": 0.2741, | |
"step": 2920 | |
}, | |
{ | |
"epoch": 1.13, | |
"learning_rate": 4.515869827239856e-05, | |
"loss": 0.2664, | |
"step": 2930 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 4.4957814383286464e-05, | |
"loss": 0.2743, | |
"step": 2940 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 4.475693049417437e-05, | |
"loss": 0.2587, | |
"step": 2950 | |
}, | |
{ | |
"epoch": 1.14, | |
"learning_rate": 4.4556046605062276e-05, | |
"loss": 0.2744, | |
"step": 2960 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 4.435516271595018e-05, | |
"loss": 0.2637, | |
"step": 2970 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 4.4154278826838094e-05, | |
"loss": 0.2604, | |
"step": 2980 | |
}, | |
{ | |
"epoch": 1.15, | |
"learning_rate": 4.395339493772599e-05, | |
"loss": 0.2609, | |
"step": 2990 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 4.3752511048613906e-05, | |
"loss": 0.2564, | |
"step": 3000 | |
}, | |
{ | |
"epoch": 1.16, | |
"eval_loss": 0.317138671875, | |
"eval_runtime": 236.9531, | |
"eval_samples_per_second": 2.11, | |
"eval_steps_per_second": 0.068, | |
"step": 3000 | |
}, | |
{ | |
"epoch": 1.16, | |
"learning_rate": 4.355162715950181e-05, | |
"loss": 0.2585, | |
"step": 3010 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 4.335074327038972e-05, | |
"loss": 0.272, | |
"step": 3020 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 4.3149859381277623e-05, | |
"loss": 0.2593, | |
"step": 3030 | |
}, | |
{ | |
"epoch": 1.17, | |
"learning_rate": 4.2948975492165536e-05, | |
"loss": 0.2658, | |
"step": 3040 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 4.2748091603053435e-05, | |
"loss": 0.2643, | |
"step": 3050 | |
}, | |
{ | |
"epoch": 1.18, | |
"learning_rate": 4.254720771394135e-05, | |
"loss": 0.2631, | |
"step": 3060 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 4.234632382482925e-05, | |
"loss": 0.2376, | |
"step": 3070 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 4.214543993571716e-05, | |
"loss": 0.2645, | |
"step": 3080 | |
}, | |
{ | |
"epoch": 1.19, | |
"learning_rate": 4.1944556046605065e-05, | |
"loss": 0.2903, | |
"step": 3090 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 4.174367215749297e-05, | |
"loss": 0.2502, | |
"step": 3100 | |
}, | |
{ | |
"epoch": 1.2, | |
"eval_loss": 0.3148367702960968, | |
"eval_runtime": 232.416, | |
"eval_samples_per_second": 2.151, | |
"eval_steps_per_second": 0.069, | |
"step": 3100 | |
}, | |
{ | |
"epoch": 1.2, | |
"learning_rate": 4.154278826838088e-05, | |
"loss": 0.2389, | |
"step": 3110 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 4.134190437926879e-05, | |
"loss": 0.2575, | |
"step": 3120 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 4.114102049015669e-05, | |
"loss": 0.2825, | |
"step": 3130 | |
}, | |
{ | |
"epoch": 1.21, | |
"learning_rate": 4.09401366010446e-05, | |
"loss": 0.2574, | |
"step": 3140 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 4.073925271193251e-05, | |
"loss": 0.2707, | |
"step": 3150 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 4.053836882282041e-05, | |
"loss": 0.2589, | |
"step": 3160 | |
}, | |
{ | |
"epoch": 1.22, | |
"learning_rate": 4.033748493370832e-05, | |
"loss": 0.2646, | |
"step": 3170 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 4.0136601044596224e-05, | |
"loss": 0.2685, | |
"step": 3180 | |
}, | |
{ | |
"epoch": 1.23, | |
"learning_rate": 3.993571715548413e-05, | |
"loss": 0.2595, | |
"step": 3190 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 3.973483326637204e-05, | |
"loss": 0.2477, | |
"step": 3200 | |
}, | |
{ | |
"epoch": 1.24, | |
"eval_loss": 0.3164760172367096, | |
"eval_runtime": 229.5059, | |
"eval_samples_per_second": 2.179, | |
"eval_steps_per_second": 0.07, | |
"step": 3200 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 3.953394937725994e-05, | |
"loss": 0.2598, | |
"step": 3210 | |
}, | |
{ | |
"epoch": 1.24, | |
"learning_rate": 3.9333065488147854e-05, | |
"loss": 0.2626, | |
"step": 3220 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 3.913218159903576e-05, | |
"loss": 0.2538, | |
"step": 3230 | |
}, | |
{ | |
"epoch": 1.25, | |
"learning_rate": 3.8931297709923666e-05, | |
"loss": 0.2562, | |
"step": 3240 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 3.873041382081157e-05, | |
"loss": 0.2625, | |
"step": 3250 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 3.8529529931699484e-05, | |
"loss": 0.2638, | |
"step": 3260 | |
}, | |
{ | |
"epoch": 1.26, | |
"learning_rate": 3.8328646042587384e-05, | |
"loss": 0.2415, | |
"step": 3270 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 3.8127762153475296e-05, | |
"loss": 0.2894, | |
"step": 3280 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 3.79268782643632e-05, | |
"loss": 0.2606, | |
"step": 3290 | |
}, | |
{ | |
"epoch": 1.27, | |
"learning_rate": 3.772599437525111e-05, | |
"loss": 0.2607, | |
"step": 3300 | |
}, | |
{ | |
"epoch": 1.27, | |
"eval_loss": 0.3150809109210968, | |
"eval_runtime": 231.3186, | |
"eval_samples_per_second": 2.162, | |
"eval_steps_per_second": 0.069, | |
"step": 3300 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 3.7525110486139014e-05, | |
"loss": 0.2853, | |
"step": 3310 | |
}, | |
{ | |
"epoch": 1.28, | |
"learning_rate": 3.732422659702692e-05, | |
"loss": 0.2597, | |
"step": 3320 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 3.7123342707914825e-05, | |
"loss": 0.2653, | |
"step": 3330 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 3.692245881880274e-05, | |
"loss": 0.2626, | |
"step": 3340 | |
}, | |
{ | |
"epoch": 1.29, | |
"learning_rate": 3.672157492969064e-05, | |
"loss": 0.2465, | |
"step": 3350 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 3.652069104057855e-05, | |
"loss": 0.2997, | |
"step": 3360 | |
}, | |
{ | |
"epoch": 1.3, | |
"learning_rate": 3.6319807151466455e-05, | |
"loss": 0.2614, | |
"step": 3370 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 3.611892326235436e-05, | |
"loss": 0.2708, | |
"step": 3380 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 3.591803937324227e-05, | |
"loss": 0.2633, | |
"step": 3390 | |
}, | |
{ | |
"epoch": 1.31, | |
"learning_rate": 3.571715548413018e-05, | |
"loss": 0.2607, | |
"step": 3400 | |
}, | |
{ | |
"epoch": 1.31, | |
"eval_loss": 0.309326171875, | |
"eval_runtime": 234.7514, | |
"eval_samples_per_second": 2.13, | |
"eval_steps_per_second": 0.068, | |
"step": 3400 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 3.551627159501808e-05, | |
"loss": 0.2605, | |
"step": 3410 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 3.531538770590599e-05, | |
"loss": 0.2557, | |
"step": 3420 | |
}, | |
{ | |
"epoch": 1.32, | |
"learning_rate": 3.511450381679389e-05, | |
"loss": 0.2566, | |
"step": 3430 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 3.49136199276818e-05, | |
"loss": 0.2502, | |
"step": 3440 | |
}, | |
{ | |
"epoch": 1.33, | |
"learning_rate": 3.471273603856971e-05, | |
"loss": 0.2512, | |
"step": 3450 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 3.4511852149457614e-05, | |
"loss": 0.2966, | |
"step": 3460 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 3.431096826034552e-05, | |
"loss": 0.2408, | |
"step": 3470 | |
}, | |
{ | |
"epoch": 1.34, | |
"learning_rate": 3.411008437123343e-05, | |
"loss": 0.2667, | |
"step": 3480 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 3.390920048212133e-05, | |
"loss": 0.2851, | |
"step": 3490 | |
}, | |
{ | |
"epoch": 1.35, | |
"learning_rate": 3.3708316593009245e-05, | |
"loss": 0.2488, | |
"step": 3500 | |
}, | |
{ | |
"epoch": 1.35, | |
"eval_loss": 0.3107561469078064, | |
"eval_runtime": 232.6614, | |
"eval_samples_per_second": 2.149, | |
"eval_steps_per_second": 0.069, | |
"step": 3500 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 3.350743270389715e-05, | |
"loss": 0.2942, | |
"step": 3510 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 3.3306548814785056e-05, | |
"loss": 0.2605, | |
"step": 3520 | |
}, | |
{ | |
"epoch": 1.36, | |
"learning_rate": 3.310566492567296e-05, | |
"loss": 0.2352, | |
"step": 3530 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 3.290478103656087e-05, | |
"loss": 0.2371, | |
"step": 3540 | |
}, | |
{ | |
"epoch": 1.37, | |
"learning_rate": 3.2703897147448774e-05, | |
"loss": 0.2597, | |
"step": 3550 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 3.2503013258336686e-05, | |
"loss": 0.2452, | |
"step": 3560 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 3.2302129369224585e-05, | |
"loss": 0.2617, | |
"step": 3570 | |
}, | |
{ | |
"epoch": 1.38, | |
"learning_rate": 3.21012454801125e-05, | |
"loss": 0.2664, | |
"step": 3580 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 3.1900361591000404e-05, | |
"loss": 0.28, | |
"step": 3590 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 3.169947770188831e-05, | |
"loss": 0.2376, | |
"step": 3600 | |
}, | |
{ | |
"epoch": 1.39, | |
"eval_loss": 0.3105817437171936, | |
"eval_runtime": 232.1998, | |
"eval_samples_per_second": 2.153, | |
"eval_steps_per_second": 0.069, | |
"step": 3600 | |
}, | |
{ | |
"epoch": 1.39, | |
"learning_rate": 3.1498593812776215e-05, | |
"loss": 0.2667, | |
"step": 3610 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 3.129770992366413e-05, | |
"loss": 0.2599, | |
"step": 3620 | |
}, | |
{ | |
"epoch": 1.4, | |
"learning_rate": 3.109682603455203e-05, | |
"loss": 0.2656, | |
"step": 3630 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 3.089594214543994e-05, | |
"loss": 0.2311, | |
"step": 3640 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 3.0695058256327845e-05, | |
"loss": 0.2506, | |
"step": 3650 | |
}, | |
{ | |
"epoch": 1.41, | |
"learning_rate": 3.049417436721575e-05, | |
"loss": 0.2598, | |
"step": 3660 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 3.0293290478103657e-05, | |
"loss": 0.2654, | |
"step": 3670 | |
}, | |
{ | |
"epoch": 1.42, | |
"learning_rate": 3.0092406588991563e-05, | |
"loss": 0.2454, | |
"step": 3680 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 2.9891522699879472e-05, | |
"loss": 0.2513, | |
"step": 3690 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 2.9690638810767378e-05, | |
"loss": 0.2642, | |
"step": 3700 | |
}, | |
{ | |
"epoch": 1.43, | |
"eval_loss": 0.3145228922367096, | |
"eval_runtime": 240.0973, | |
"eval_samples_per_second": 2.082, | |
"eval_steps_per_second": 0.067, | |
"step": 3700 | |
}, | |
{ | |
"epoch": 1.43, | |
"learning_rate": 2.9489754921655284e-05, | |
"loss": 0.2386, | |
"step": 3710 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 2.9288871032543193e-05, | |
"loss": 0.2449, | |
"step": 3720 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 2.90879871434311e-05, | |
"loss": 0.2581, | |
"step": 3730 | |
}, | |
{ | |
"epoch": 1.44, | |
"learning_rate": 2.8887103254319005e-05, | |
"loss": 0.25, | |
"step": 3740 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 2.8686219365206914e-05, | |
"loss": 0.2552, | |
"step": 3750 | |
}, | |
{ | |
"epoch": 1.45, | |
"learning_rate": 2.848533547609482e-05, | |
"loss": 0.2819, | |
"step": 3760 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 2.8284451586982725e-05, | |
"loss": 0.2491, | |
"step": 3770 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 2.8083567697870635e-05, | |
"loss": 0.2582, | |
"step": 3780 | |
}, | |
{ | |
"epoch": 1.46, | |
"learning_rate": 2.7882683808758537e-05, | |
"loss": 0.2479, | |
"step": 3790 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 2.7681799919646446e-05, | |
"loss": 0.2686, | |
"step": 3800 | |
}, | |
{ | |
"epoch": 1.47, | |
"eval_loss": 0.3105817437171936, | |
"eval_runtime": 229.2876, | |
"eval_samples_per_second": 2.181, | |
"eval_steps_per_second": 0.07, | |
"step": 3800 | |
}, | |
{ | |
"epoch": 1.47, | |
"learning_rate": 2.7480916030534355e-05, | |
"loss": 0.265, | |
"step": 3810 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 2.7280032141422258e-05, | |
"loss": 0.2557, | |
"step": 3820 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 2.7079148252310167e-05, | |
"loss": 0.2458, | |
"step": 3830 | |
}, | |
{ | |
"epoch": 1.48, | |
"learning_rate": 2.6878264363198073e-05, | |
"loss": 0.2683, | |
"step": 3840 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 2.667738047408598e-05, | |
"loss": 0.2494, | |
"step": 3850 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 2.6476496584973888e-05, | |
"loss": 0.2631, | |
"step": 3860 | |
}, | |
{ | |
"epoch": 1.49, | |
"learning_rate": 2.6275612695861794e-05, | |
"loss": 0.256, | |
"step": 3870 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 2.60747288067497e-05, | |
"loss": 0.2686, | |
"step": 3880 | |
}, | |
{ | |
"epoch": 1.5, | |
"learning_rate": 2.587384491763761e-05, | |
"loss": 0.2562, | |
"step": 3890 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 2.5672961028525515e-05, | |
"loss": 0.2474, | |
"step": 3900 | |
}, | |
{ | |
"epoch": 1.51, | |
"eval_loss": 0.3085588812828064, | |
"eval_runtime": 234.5548, | |
"eval_samples_per_second": 2.132, | |
"eval_steps_per_second": 0.068, | |
"step": 3900 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 2.547207713941342e-05, | |
"loss": 0.2615, | |
"step": 3910 | |
}, | |
{ | |
"epoch": 1.51, | |
"learning_rate": 2.527119325030133e-05, | |
"loss": 0.2588, | |
"step": 3920 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 2.5070309361189232e-05, | |
"loss": 0.2585, | |
"step": 3930 | |
}, | |
{ | |
"epoch": 1.52, | |
"learning_rate": 2.486942547207714e-05, | |
"loss": 0.2703, | |
"step": 3940 | |
}, | |
{ | |
"epoch": 1.53, | |
"learning_rate": 2.4668541582965047e-05, | |
"loss": 0.2341, | |
"step": 3950 | |
}, | |
{ | |
"epoch": 1.53, | |
"learning_rate": 2.4467657693852956e-05, | |
"loss": 0.2556, | |
"step": 3960 | |
}, | |
{ | |
"epoch": 1.53, | |
"learning_rate": 2.4266773804740862e-05, | |
"loss": 0.2621, | |
"step": 3970 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 2.4065889915628768e-05, | |
"loss": 0.2614, | |
"step": 3980 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 2.3865006026516677e-05, | |
"loss": 0.2531, | |
"step": 3990 | |
}, | |
{ | |
"epoch": 1.54, | |
"learning_rate": 2.3664122137404583e-05, | |
"loss": 0.265, | |
"step": 4000 | |
}, | |
{ | |
"epoch": 1.54, | |
"eval_loss": 0.3104073703289032, | |
"eval_runtime": 234.8987, | |
"eval_samples_per_second": 2.129, | |
"eval_steps_per_second": 0.068, | |
"step": 4000 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 2.346323824829249e-05, | |
"loss": 0.2684, | |
"step": 4010 | |
}, | |
{ | |
"epoch": 1.55, | |
"learning_rate": 2.3262354359180395e-05, | |
"loss": 0.2399, | |
"step": 4020 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 2.3061470470068304e-05, | |
"loss": 0.2632, | |
"step": 4030 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 2.286058658095621e-05, | |
"loss": 0.2659, | |
"step": 4040 | |
}, | |
{ | |
"epoch": 1.56, | |
"learning_rate": 2.2659702691844116e-05, | |
"loss": 0.2662, | |
"step": 4050 | |
}, | |
{ | |
"epoch": 1.57, | |
"learning_rate": 2.245881880273202e-05, | |
"loss": 0.24, | |
"step": 4060 | |
}, | |
{ | |
"epoch": 1.57, | |
"learning_rate": 2.225793491361993e-05, | |
"loss": 0.248, | |
"step": 4070 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 2.2057051024507836e-05, | |
"loss": 0.2675, | |
"step": 4080 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 2.1856167135395742e-05, | |
"loss": 0.263, | |
"step": 4090 | |
}, | |
{ | |
"epoch": 1.58, | |
"learning_rate": 2.165528324628365e-05, | |
"loss": 0.259, | |
"step": 4100 | |
}, | |
{ | |
"epoch": 1.58, | |
"eval_loss": 0.3070940375328064, | |
"eval_runtime": 232.4523, | |
"eval_samples_per_second": 2.151, | |
"eval_steps_per_second": 0.069, | |
"step": 4100 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 2.1454399357171557e-05, | |
"loss": 0.2708, | |
"step": 4110 | |
}, | |
{ | |
"epoch": 1.59, | |
"learning_rate": 2.1253515468059463e-05, | |
"loss": 0.2591, | |
"step": 4120 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 2.105263157894737e-05, | |
"loss": 0.2451, | |
"step": 4130 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 2.0851747689835278e-05, | |
"loss": 0.2636, | |
"step": 4140 | |
}, | |
{ | |
"epoch": 1.6, | |
"learning_rate": 2.0650863800723184e-05, | |
"loss": 0.2754, | |
"step": 4150 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 2.044997991161109e-05, | |
"loss": 0.2639, | |
"step": 4160 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 2.0249096022499e-05, | |
"loss": 0.258, | |
"step": 4170 | |
}, | |
{ | |
"epoch": 1.61, | |
"learning_rate": 2.0048212133386905e-05, | |
"loss": 0.2585, | |
"step": 4180 | |
}, | |
{ | |
"epoch": 1.62, | |
"learning_rate": 1.984732824427481e-05, | |
"loss": 0.2735, | |
"step": 4190 | |
}, | |
{ | |
"epoch": 1.62, | |
"learning_rate": 1.9646444355162716e-05, | |
"loss": 0.2529, | |
"step": 4200 | |
}, | |
{ | |
"epoch": 1.62, | |
"eval_loss": 0.3050014078617096, | |
"eval_runtime": 231.7346, | |
"eval_samples_per_second": 2.158, | |
"eval_steps_per_second": 0.069, | |
"step": 4200 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 1.9445560466050626e-05, | |
"loss": 0.2392, | |
"step": 4210 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 1.924467657693853e-05, | |
"loss": 0.2495, | |
"step": 4220 | |
}, | |
{ | |
"epoch": 1.63, | |
"learning_rate": 1.9043792687826437e-05, | |
"loss": 0.2435, | |
"step": 4230 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 1.8842908798714343e-05, | |
"loss": 0.2352, | |
"step": 4240 | |
}, | |
{ | |
"epoch": 1.64, | |
"learning_rate": 1.8642024909602252e-05, | |
"loss": 0.2506, | |
"step": 4250 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 1.8441141020490158e-05, | |
"loss": 0.2384, | |
"step": 4260 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 1.8240257131378064e-05, | |
"loss": 0.256, | |
"step": 4270 | |
}, | |
{ | |
"epoch": 1.65, | |
"learning_rate": 1.8039373242265973e-05, | |
"loss": 0.255, | |
"step": 4280 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 1.783848935315388e-05, | |
"loss": 0.2645, | |
"step": 4290 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 1.7637605464041785e-05, | |
"loss": 0.2624, | |
"step": 4300 | |
}, | |
{ | |
"epoch": 1.66, | |
"eval_loss": 0.308837890625, | |
"eval_runtime": 236.1172, | |
"eval_samples_per_second": 2.118, | |
"eval_steps_per_second": 0.068, | |
"step": 4300 | |
}, | |
{ | |
"epoch": 1.66, | |
"learning_rate": 1.743672157492969e-05, | |
"loss": 0.2557, | |
"step": 4310 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 1.72358376858176e-05, | |
"loss": 0.2391, | |
"step": 4320 | |
}, | |
{ | |
"epoch": 1.67, | |
"learning_rate": 1.7034953796705506e-05, | |
"loss": 0.2361, | |
"step": 4330 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 1.683406990759341e-05, | |
"loss": 0.2389, | |
"step": 4340 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 1.663318601848132e-05, | |
"loss": 0.2491, | |
"step": 4350 | |
}, | |
{ | |
"epoch": 1.68, | |
"learning_rate": 1.6432302129369227e-05, | |
"loss": 0.2556, | |
"step": 4360 | |
}, | |
{ | |
"epoch": 1.69, | |
"learning_rate": 1.6231418240257132e-05, | |
"loss": 0.2963, | |
"step": 4370 | |
}, | |
{ | |
"epoch": 1.69, | |
"learning_rate": 1.6030534351145038e-05, | |
"loss": 0.2571, | |
"step": 4380 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 1.5829650462032947e-05, | |
"loss": 0.2707, | |
"step": 4390 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 1.5628766572920853e-05, | |
"loss": 0.2513, | |
"step": 4400 | |
}, | |
{ | |
"epoch": 1.7, | |
"eval_loss": 0.3080357015132904, | |
"eval_runtime": 231.5394, | |
"eval_samples_per_second": 2.159, | |
"eval_steps_per_second": 0.069, | |
"step": 4400 | |
}, | |
{ | |
"epoch": 1.7, | |
"learning_rate": 1.542788268380876e-05, | |
"loss": 0.2542, | |
"step": 4410 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 1.5226998794696665e-05, | |
"loss": 0.2576, | |
"step": 4420 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 1.5026114905584574e-05, | |
"loss": 0.2314, | |
"step": 4430 | |
}, | |
{ | |
"epoch": 1.71, | |
"learning_rate": 1.482523101647248e-05, | |
"loss": 0.242, | |
"step": 4440 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 1.4624347127360386e-05, | |
"loss": 0.2599, | |
"step": 4450 | |
}, | |
{ | |
"epoch": 1.72, | |
"learning_rate": 1.4423463238248295e-05, | |
"loss": 0.267, | |
"step": 4460 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 1.42225793491362e-05, | |
"loss": 0.2508, | |
"step": 4470 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 1.4021695460024107e-05, | |
"loss": 0.2845, | |
"step": 4480 | |
}, | |
{ | |
"epoch": 1.73, | |
"learning_rate": 1.3820811570912012e-05, | |
"loss": 0.2647, | |
"step": 4490 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 1.3619927681799922e-05, | |
"loss": 0.2667, | |
"step": 4500 | |
}, | |
{ | |
"epoch": 1.74, | |
"eval_loss": 0.3072335422039032, | |
"eval_runtime": 231.7522, | |
"eval_samples_per_second": 2.157, | |
"eval_steps_per_second": 0.069, | |
"step": 4500 | |
}, | |
{ | |
"epoch": 1.74, | |
"learning_rate": 1.3419043792687827e-05, | |
"loss": 0.2759, | |
"step": 4510 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 1.3218159903575733e-05, | |
"loss": 0.2618, | |
"step": 4520 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 1.3017276014463642e-05, | |
"loss": 0.2465, | |
"step": 4530 | |
}, | |
{ | |
"epoch": 1.75, | |
"learning_rate": 1.2816392125351548e-05, | |
"loss": 0.2601, | |
"step": 4540 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 1.2615508236239454e-05, | |
"loss": 0.2567, | |
"step": 4550 | |
}, | |
{ | |
"epoch": 1.76, | |
"learning_rate": 1.2414624347127362e-05, | |
"loss": 0.2366, | |
"step": 4560 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 1.2213740458015267e-05, | |
"loss": 0.2563, | |
"step": 4570 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 1.2012856568903175e-05, | |
"loss": 0.2599, | |
"step": 4580 | |
}, | |
{ | |
"epoch": 1.77, | |
"learning_rate": 1.181197267979108e-05, | |
"loss": 0.2677, | |
"step": 4590 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 1.1611088790678988e-05, | |
"loss": 0.2559, | |
"step": 4600 | |
}, | |
{ | |
"epoch": 1.78, | |
"eval_loss": 0.3053501546382904, | |
"eval_runtime": 227.8046, | |
"eval_samples_per_second": 2.195, | |
"eval_steps_per_second": 0.07, | |
"step": 4600 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 1.1410204901566896e-05, | |
"loss": 0.2499, | |
"step": 4610 | |
}, | |
{ | |
"epoch": 1.78, | |
"learning_rate": 1.1209321012454802e-05, | |
"loss": 0.27, | |
"step": 4620 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 1.1008437123342709e-05, | |
"loss": 0.2661, | |
"step": 4630 | |
}, | |
{ | |
"epoch": 1.79, | |
"learning_rate": 1.0807553234230615e-05, | |
"loss": 0.2456, | |
"step": 4640 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 1.0606669345118522e-05, | |
"loss": 0.2711, | |
"step": 4650 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 1.0405785456006428e-05, | |
"loss": 0.2593, | |
"step": 4660 | |
}, | |
{ | |
"epoch": 1.8, | |
"learning_rate": 1.0204901566894336e-05, | |
"loss": 0.2536, | |
"step": 4670 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 1.0004017677782242e-05, | |
"loss": 0.2514, | |
"step": 4680 | |
}, | |
{ | |
"epoch": 1.81, | |
"learning_rate": 9.803133788670149e-06, | |
"loss": 0.2489, | |
"step": 4690 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 9.602249899558057e-06, | |
"loss": 0.2238, | |
"step": 4700 | |
}, | |
{ | |
"epoch": 1.82, | |
"eval_loss": 0.3055942952632904, | |
"eval_runtime": 229.6273, | |
"eval_samples_per_second": 2.177, | |
"eval_steps_per_second": 0.07, | |
"step": 4700 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 9.401366010445962e-06, | |
"loss": 0.2482, | |
"step": 4710 | |
}, | |
{ | |
"epoch": 1.82, | |
"learning_rate": 9.20048212133387e-06, | |
"loss": 0.2443, | |
"step": 4720 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 8.999598232221776e-06, | |
"loss": 0.2386, | |
"step": 4730 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 8.798714343109683e-06, | |
"loss": 0.2502, | |
"step": 4740 | |
}, | |
{ | |
"epoch": 1.83, | |
"learning_rate": 8.597830453997589e-06, | |
"loss": 0.2552, | |
"step": 4750 | |
}, | |
{ | |
"epoch": 1.84, | |
"learning_rate": 8.396946564885497e-06, | |
"loss": 0.2473, | |
"step": 4760 | |
}, | |
{ | |
"epoch": 1.84, | |
"learning_rate": 8.196062675773402e-06, | |
"loss": 0.249, | |
"step": 4770 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 7.99517878666131e-06, | |
"loss": 0.2373, | |
"step": 4780 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 7.794294897549218e-06, | |
"loss": 0.2211, | |
"step": 4790 | |
}, | |
{ | |
"epoch": 1.85, | |
"learning_rate": 7.593411008437123e-06, | |
"loss": 0.2686, | |
"step": 4800 | |
}, | |
{ | |
"epoch": 1.85, | |
"eval_loss": 0.3053850531578064, | |
"eval_runtime": 231.0424, | |
"eval_samples_per_second": 2.164, | |
"eval_steps_per_second": 0.069, | |
"step": 4800 | |
}, | |
{ | |
"epoch": 1.86, | |
"learning_rate": 7.392527119325031e-06, | |
"loss": 0.2641, | |
"step": 4810 | |
}, | |
{ | |
"epoch": 1.86, | |
"learning_rate": 7.191643230212937e-06, | |
"loss": 0.2389, | |
"step": 4820 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 6.990759341100844e-06, | |
"loss": 0.2573, | |
"step": 4830 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 6.78987545198875e-06, | |
"loss": 0.2335, | |
"step": 4840 | |
}, | |
{ | |
"epoch": 1.87, | |
"learning_rate": 6.5889915628766575e-06, | |
"loss": 0.2724, | |
"step": 4850 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 6.388107673764563e-06, | |
"loss": 0.2552, | |
"step": 4860 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 6.187223784652471e-06, | |
"loss": 0.2422, | |
"step": 4870 | |
}, | |
{ | |
"epoch": 1.88, | |
"learning_rate": 5.9863398955403775e-06, | |
"loss": 0.252, | |
"step": 4880 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 5.785456006428284e-06, | |
"loss": 0.2619, | |
"step": 4890 | |
}, | |
{ | |
"epoch": 1.89, | |
"learning_rate": 5.584572117316191e-06, | |
"loss": 0.2684, | |
"step": 4900 | |
}, | |
{ | |
"epoch": 1.89, | |
"eval_loss": 0.3049665093421936, | |
"eval_runtime": 231.0605, | |
"eval_samples_per_second": 2.164, | |
"eval_steps_per_second": 0.069, | |
"step": 4900 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 5.383688228204098e-06, | |
"loss": 0.2503, | |
"step": 4910 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 5.182804339092005e-06, | |
"loss": 0.2581, | |
"step": 4920 | |
}, | |
{ | |
"epoch": 1.9, | |
"learning_rate": 4.981920449979912e-06, | |
"loss": 0.2445, | |
"step": 4930 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 4.781036560867818e-06, | |
"loss": 0.2527, | |
"step": 4940 | |
}, | |
{ | |
"epoch": 1.91, | |
"learning_rate": 4.580152671755725e-06, | |
"loss": 0.2579, | |
"step": 4950 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 4.379268782643632e-06, | |
"loss": 0.2558, | |
"step": 4960 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 4.178384893531538e-06, | |
"loss": 0.2598, | |
"step": 4970 | |
}, | |
{ | |
"epoch": 1.92, | |
"learning_rate": 3.977501004419446e-06, | |
"loss": 0.231, | |
"step": 4980 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 3.776617115307352e-06, | |
"loss": 0.2665, | |
"step": 4990 | |
}, | |
{ | |
"epoch": 1.93, | |
"learning_rate": 3.5757332261952597e-06, | |
"loss": 0.247, | |
"step": 5000 | |
}, | |
{ | |
"epoch": 1.93, | |
"eval_loss": 0.3040597140789032, | |
"eval_runtime": 233.9026, | |
"eval_samples_per_second": 2.138, | |
"eval_steps_per_second": 0.068, | |
"step": 5000 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 3.3748493370831664e-06, | |
"loss": 0.2499, | |
"step": 5010 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 3.173965447971073e-06, | |
"loss": 0.241, | |
"step": 5020 | |
}, | |
{ | |
"epoch": 1.94, | |
"learning_rate": 2.9730815588589797e-06, | |
"loss": 0.2455, | |
"step": 5030 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 2.7721976697468864e-06, | |
"loss": 0.2592, | |
"step": 5040 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 2.571313780634793e-06, | |
"loss": 0.2829, | |
"step": 5050 | |
}, | |
{ | |
"epoch": 1.95, | |
"learning_rate": 2.3704298915227e-06, | |
"loss": 0.251, | |
"step": 5060 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 2.169546002410607e-06, | |
"loss": 0.2587, | |
"step": 5070 | |
}, | |
{ | |
"epoch": 1.96, | |
"learning_rate": 1.9686621132985135e-06, | |
"loss": 0.2574, | |
"step": 5080 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 1.7677782241864203e-06, | |
"loss": 0.2697, | |
"step": 5090 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 1.566894335074327e-06, | |
"loss": 0.2477, | |
"step": 5100 | |
}, | |
{ | |
"epoch": 1.97, | |
"eval_loss": 0.303466796875, | |
"eval_runtime": 231.3482, | |
"eval_samples_per_second": 2.161, | |
"eval_steps_per_second": 0.069, | |
"step": 5100 | |
}, | |
{ | |
"epoch": 1.97, | |
"learning_rate": 1.3660104459622339e-06, | |
"loss": 0.2642, | |
"step": 5110 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 1.1651265568501408e-06, | |
"loss": 0.2616, | |
"step": 5120 | |
}, | |
{ | |
"epoch": 1.98, | |
"learning_rate": 9.642426677380474e-07, | |
"loss": 0.2268, | |
"step": 5130 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 7.633587786259542e-07, | |
"loss": 0.2561, | |
"step": 5140 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 5.62474889513861e-07, | |
"loss": 0.2544, | |
"step": 5150 | |
}, | |
{ | |
"epoch": 1.99, | |
"learning_rate": 3.6159100040176776e-07, | |
"loss": 0.2465, | |
"step": 5160 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 1.6070711128967456e-07, | |
"loss": 0.2721, | |
"step": 5170 | |
} | |
], | |
"max_steps": 5178, | |
"num_train_epochs": 2, | |
"total_flos": 3.628920535154426e+18, | |
"trial_name": null, | |
"trial_params": null | |
} | |