PEFT
Safetensors
vidore-experimental
vidore
colsmolvlm-alpha / checkpoint-2772 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
6118dbc verified
raw
history blame
52.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 2772,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010822510822510822,
"grad_norm": 1.453125,
"learning_rate": 5e-05,
"loss": 1.0068,
"step": 10
},
{
"epoch": 0.021645021645021644,
"grad_norm": 0.7421875,
"learning_rate": 0.0001,
"loss": 0.7642,
"step": 20
},
{
"epoch": 0.032467532467532464,
"grad_norm": 1.140625,
"learning_rate": 0.00015,
"loss": 0.676,
"step": 30
},
{
"epoch": 0.04329004329004329,
"grad_norm": 0.2890625,
"learning_rate": 0.0002,
"loss": 0.6209,
"step": 40
},
{
"epoch": 0.05411255411255411,
"grad_norm": 0.4453125,
"learning_rate": 0.00025,
"loss": 0.5199,
"step": 50
},
{
"epoch": 0.06493506493506493,
"grad_norm": 0.353515625,
"learning_rate": 0.0003,
"loss": 0.3979,
"step": 60
},
{
"epoch": 0.07575757575757576,
"grad_norm": 0.80859375,
"learning_rate": 0.00035,
"loss": 0.3112,
"step": 70
},
{
"epoch": 0.08658008658008658,
"grad_norm": 0.56640625,
"learning_rate": 0.0004,
"loss": 0.2598,
"step": 80
},
{
"epoch": 0.09740259740259741,
"grad_norm": 0.25,
"learning_rate": 0.00045000000000000004,
"loss": 0.2816,
"step": 90
},
{
"epoch": 0.10822510822510822,
"grad_norm": 0.359375,
"learning_rate": 0.0005,
"loss": 0.2459,
"step": 100
},
{
"epoch": 0.10822510822510822,
"eval_loss": 0.2404150664806366,
"eval_runtime": 62.0434,
"eval_samples_per_second": 8.059,
"eval_steps_per_second": 0.064,
"step": 100
},
{
"epoch": 0.11904761904761904,
"grad_norm": 0.447265625,
"learning_rate": 0.0004981287425149701,
"loss": 0.2341,
"step": 110
},
{
"epoch": 0.12987012987012986,
"grad_norm": 0.353515625,
"learning_rate": 0.0004962574850299401,
"loss": 0.2285,
"step": 120
},
{
"epoch": 0.1406926406926407,
"grad_norm": 0.314453125,
"learning_rate": 0.0004943862275449102,
"loss": 0.2229,
"step": 130
},
{
"epoch": 0.15151515151515152,
"grad_norm": 0.392578125,
"learning_rate": 0.0004925149700598803,
"loss": 0.2184,
"step": 140
},
{
"epoch": 0.16233766233766234,
"grad_norm": 0.263671875,
"learning_rate": 0.0004906437125748503,
"loss": 0.2225,
"step": 150
},
{
"epoch": 0.17316017316017315,
"grad_norm": 0.474609375,
"learning_rate": 0.0004887724550898204,
"loss": 0.2258,
"step": 160
},
{
"epoch": 0.18398268398268397,
"grad_norm": 0.408203125,
"learning_rate": 0.0004869011976047904,
"loss": 0.2228,
"step": 170
},
{
"epoch": 0.19480519480519481,
"grad_norm": 0.2734375,
"learning_rate": 0.00048502994011976046,
"loss": 0.2226,
"step": 180
},
{
"epoch": 0.20562770562770563,
"grad_norm": 0.236328125,
"learning_rate": 0.00048315868263473056,
"loss": 0.2105,
"step": 190
},
{
"epoch": 0.21645021645021645,
"grad_norm": 0.3125,
"learning_rate": 0.0004812874251497006,
"loss": 0.1944,
"step": 200
},
{
"epoch": 0.21645021645021645,
"eval_loss": 0.21168169379234314,
"eval_runtime": 46.9599,
"eval_samples_per_second": 10.647,
"eval_steps_per_second": 0.085,
"step": 200
},
{
"epoch": 0.22727272727272727,
"grad_norm": 0.29296875,
"learning_rate": 0.00047941616766467065,
"loss": 0.177,
"step": 210
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.203125,
"learning_rate": 0.00047754491017964074,
"loss": 0.2048,
"step": 220
},
{
"epoch": 0.24891774891774893,
"grad_norm": 0.32421875,
"learning_rate": 0.0004756736526946108,
"loss": 0.185,
"step": 230
},
{
"epoch": 0.2597402597402597,
"grad_norm": 0.279296875,
"learning_rate": 0.00047380239520958083,
"loss": 0.1941,
"step": 240
},
{
"epoch": 0.27056277056277056,
"grad_norm": 0.30078125,
"learning_rate": 0.0004719311377245509,
"loss": 0.1975,
"step": 250
},
{
"epoch": 0.2813852813852814,
"grad_norm": 0.2197265625,
"learning_rate": 0.00047005988023952097,
"loss": 0.1745,
"step": 260
},
{
"epoch": 0.2922077922077922,
"grad_norm": 0.28515625,
"learning_rate": 0.000468188622754491,
"loss": 0.1835,
"step": 270
},
{
"epoch": 0.30303030303030304,
"grad_norm": 0.2177734375,
"learning_rate": 0.0004663173652694611,
"loss": 0.2128,
"step": 280
},
{
"epoch": 0.31385281385281383,
"grad_norm": 0.2490234375,
"learning_rate": 0.00046444610778443115,
"loss": 0.1647,
"step": 290
},
{
"epoch": 0.3246753246753247,
"grad_norm": 0.2412109375,
"learning_rate": 0.0004625748502994012,
"loss": 0.1947,
"step": 300
},
{
"epoch": 0.3246753246753247,
"eval_loss": 0.18798980116844177,
"eval_runtime": 47.046,
"eval_samples_per_second": 10.628,
"eval_steps_per_second": 0.085,
"step": 300
},
{
"epoch": 0.3354978354978355,
"grad_norm": 0.2392578125,
"learning_rate": 0.00046070359281437124,
"loss": 0.2069,
"step": 310
},
{
"epoch": 0.3463203463203463,
"grad_norm": 0.26953125,
"learning_rate": 0.00045883233532934134,
"loss": 0.2029,
"step": 320
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.26171875,
"learning_rate": 0.0004569610778443114,
"loss": 0.2126,
"step": 330
},
{
"epoch": 0.36796536796536794,
"grad_norm": 0.16015625,
"learning_rate": 0.0004550898203592814,
"loss": 0.1838,
"step": 340
},
{
"epoch": 0.3787878787878788,
"grad_norm": 0.271484375,
"learning_rate": 0.0004532185628742515,
"loss": 0.1869,
"step": 350
},
{
"epoch": 0.38961038961038963,
"grad_norm": 0.2333984375,
"learning_rate": 0.00045134730538922156,
"loss": 0.1897,
"step": 360
},
{
"epoch": 0.4004329004329004,
"grad_norm": 0.2236328125,
"learning_rate": 0.0004494760479041916,
"loss": 0.1896,
"step": 370
},
{
"epoch": 0.41125541125541126,
"grad_norm": 0.275390625,
"learning_rate": 0.0004476047904191617,
"loss": 0.1956,
"step": 380
},
{
"epoch": 0.42207792207792205,
"grad_norm": 0.2099609375,
"learning_rate": 0.00044573353293413174,
"loss": 0.1776,
"step": 390
},
{
"epoch": 0.4329004329004329,
"grad_norm": 0.2314453125,
"learning_rate": 0.0004438622754491018,
"loss": 0.1866,
"step": 400
},
{
"epoch": 0.4329004329004329,
"eval_loss": 0.1857587993144989,
"eval_runtime": 49.3784,
"eval_samples_per_second": 10.126,
"eval_steps_per_second": 0.081,
"step": 400
},
{
"epoch": 0.44372294372294374,
"grad_norm": 0.37109375,
"learning_rate": 0.0004419910179640719,
"loss": 0.1785,
"step": 410
},
{
"epoch": 0.45454545454545453,
"grad_norm": 0.26953125,
"learning_rate": 0.00044011976047904193,
"loss": 0.2015,
"step": 420
},
{
"epoch": 0.4653679653679654,
"grad_norm": 0.26171875,
"learning_rate": 0.00043824850299401197,
"loss": 0.1682,
"step": 430
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.2734375,
"learning_rate": 0.00043637724550898207,
"loss": 0.1783,
"step": 440
},
{
"epoch": 0.487012987012987,
"grad_norm": 0.2138671875,
"learning_rate": 0.00043450598802395206,
"loss": 0.188,
"step": 450
},
{
"epoch": 0.49783549783549785,
"grad_norm": 0.1669921875,
"learning_rate": 0.00043263473053892215,
"loss": 0.1804,
"step": 460
},
{
"epoch": 0.5086580086580087,
"grad_norm": 0.26953125,
"learning_rate": 0.00043076347305389225,
"loss": 0.1698,
"step": 470
},
{
"epoch": 0.5194805194805194,
"grad_norm": 0.291015625,
"learning_rate": 0.00042889221556886224,
"loss": 0.1776,
"step": 480
},
{
"epoch": 0.5303030303030303,
"grad_norm": 0.287109375,
"learning_rate": 0.00042702095808383234,
"loss": 0.1753,
"step": 490
},
{
"epoch": 0.5411255411255411,
"grad_norm": 0.244140625,
"learning_rate": 0.00042514970059880243,
"loss": 0.1786,
"step": 500
},
{
"epoch": 0.5411255411255411,
"eval_loss": 0.18613742291927338,
"eval_runtime": 49.4353,
"eval_samples_per_second": 10.114,
"eval_steps_per_second": 0.081,
"step": 500
},
{
"epoch": 0.551948051948052,
"grad_norm": 0.30859375,
"learning_rate": 0.0004232784431137724,
"loss": 0.1558,
"step": 510
},
{
"epoch": 0.5627705627705628,
"grad_norm": 0.1484375,
"learning_rate": 0.0004214071856287425,
"loss": 0.1595,
"step": 520
},
{
"epoch": 0.5735930735930735,
"grad_norm": 0.208984375,
"learning_rate": 0.0004195359281437126,
"loss": 0.161,
"step": 530
},
{
"epoch": 0.5844155844155844,
"grad_norm": 0.21484375,
"learning_rate": 0.00041766467065868266,
"loss": 0.1699,
"step": 540
},
{
"epoch": 0.5952380952380952,
"grad_norm": 0.2890625,
"learning_rate": 0.0004157934131736527,
"loss": 0.1694,
"step": 550
},
{
"epoch": 0.6060606060606061,
"grad_norm": 0.1943359375,
"learning_rate": 0.00041392215568862275,
"loss": 0.1753,
"step": 560
},
{
"epoch": 0.6168831168831169,
"grad_norm": 0.185546875,
"learning_rate": 0.00041205089820359284,
"loss": 0.1781,
"step": 570
},
{
"epoch": 0.6277056277056277,
"grad_norm": 0.283203125,
"learning_rate": 0.0004101796407185629,
"loss": 0.1778,
"step": 580
},
{
"epoch": 0.6385281385281385,
"grad_norm": 0.208984375,
"learning_rate": 0.00040830838323353293,
"loss": 0.1501,
"step": 590
},
{
"epoch": 0.6493506493506493,
"grad_norm": 0.2021484375,
"learning_rate": 0.000406437125748503,
"loss": 0.1855,
"step": 600
},
{
"epoch": 0.6493506493506493,
"eval_loss": 0.18048742413520813,
"eval_runtime": 52.6384,
"eval_samples_per_second": 9.499,
"eval_steps_per_second": 0.076,
"step": 600
},
{
"epoch": 0.6601731601731602,
"grad_norm": 0.162109375,
"learning_rate": 0.00040456586826347307,
"loss": 0.1783,
"step": 610
},
{
"epoch": 0.670995670995671,
"grad_norm": 0.330078125,
"learning_rate": 0.0004026946107784431,
"loss": 0.1604,
"step": 620
},
{
"epoch": 0.6818181818181818,
"grad_norm": 0.205078125,
"learning_rate": 0.0004008233532934132,
"loss": 0.168,
"step": 630
},
{
"epoch": 0.6926406926406926,
"grad_norm": 0.2080078125,
"learning_rate": 0.00039895209580838325,
"loss": 0.1583,
"step": 640
},
{
"epoch": 0.7034632034632035,
"grad_norm": 0.203125,
"learning_rate": 0.0003970808383233533,
"loss": 0.1432,
"step": 650
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.2177734375,
"learning_rate": 0.0003952095808383234,
"loss": 0.1823,
"step": 660
},
{
"epoch": 0.7251082251082251,
"grad_norm": 0.2275390625,
"learning_rate": 0.00039333832335329343,
"loss": 0.1776,
"step": 670
},
{
"epoch": 0.7359307359307359,
"grad_norm": 0.234375,
"learning_rate": 0.0003914670658682635,
"loss": 0.1513,
"step": 680
},
{
"epoch": 0.7467532467532467,
"grad_norm": 0.283203125,
"learning_rate": 0.0003895958083832336,
"loss": 0.1692,
"step": 690
},
{
"epoch": 0.7575757575757576,
"grad_norm": 0.140625,
"learning_rate": 0.00038772455089820356,
"loss": 0.1716,
"step": 700
},
{
"epoch": 0.7575757575757576,
"eval_loss": 0.16686995327472687,
"eval_runtime": 48.5544,
"eval_samples_per_second": 10.298,
"eval_steps_per_second": 0.082,
"step": 700
},
{
"epoch": 0.7683982683982684,
"grad_norm": 0.28125,
"learning_rate": 0.00038585329341317366,
"loss": 0.1619,
"step": 710
},
{
"epoch": 0.7792207792207793,
"grad_norm": 0.2197265625,
"learning_rate": 0.00038398203592814376,
"loss": 0.167,
"step": 720
},
{
"epoch": 0.79004329004329,
"grad_norm": 0.3125,
"learning_rate": 0.00038211077844311375,
"loss": 0.1507,
"step": 730
},
{
"epoch": 0.8008658008658008,
"grad_norm": 0.390625,
"learning_rate": 0.00038023952095808384,
"loss": 0.1649,
"step": 740
},
{
"epoch": 0.8116883116883117,
"grad_norm": 0.185546875,
"learning_rate": 0.00037836826347305394,
"loss": 0.1537,
"step": 750
},
{
"epoch": 0.8225108225108225,
"grad_norm": 0.2021484375,
"learning_rate": 0.00037649700598802393,
"loss": 0.1617,
"step": 760
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.267578125,
"learning_rate": 0.00037462574850299403,
"loss": 0.1641,
"step": 770
},
{
"epoch": 0.8441558441558441,
"grad_norm": 0.296875,
"learning_rate": 0.0003727544910179641,
"loss": 0.1742,
"step": 780
},
{
"epoch": 0.854978354978355,
"grad_norm": 0.2265625,
"learning_rate": 0.0003708832335329341,
"loss": 0.1787,
"step": 790
},
{
"epoch": 0.8658008658008658,
"grad_norm": 0.1845703125,
"learning_rate": 0.0003690119760479042,
"loss": 0.1736,
"step": 800
},
{
"epoch": 0.8658008658008658,
"eval_loss": 0.16875208914279938,
"eval_runtime": 49.7255,
"eval_samples_per_second": 10.055,
"eval_steps_per_second": 0.08,
"step": 800
},
{
"epoch": 0.8766233766233766,
"grad_norm": 0.2109375,
"learning_rate": 0.00036714071856287425,
"loss": 0.1729,
"step": 810
},
{
"epoch": 0.8874458874458875,
"grad_norm": 0.392578125,
"learning_rate": 0.0003652694610778443,
"loss": 0.1617,
"step": 820
},
{
"epoch": 0.8982683982683982,
"grad_norm": 0.2578125,
"learning_rate": 0.0003633982035928144,
"loss": 0.1637,
"step": 830
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.2109375,
"learning_rate": 0.00036152694610778444,
"loss": 0.1569,
"step": 840
},
{
"epoch": 0.9199134199134199,
"grad_norm": 0.18359375,
"learning_rate": 0.0003596556886227545,
"loss": 0.1547,
"step": 850
},
{
"epoch": 0.9307359307359307,
"grad_norm": 0.2197265625,
"learning_rate": 0.0003577844311377246,
"loss": 0.1645,
"step": 860
},
{
"epoch": 0.9415584415584416,
"grad_norm": 0.357421875,
"learning_rate": 0.0003559131736526946,
"loss": 0.163,
"step": 870
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.255859375,
"learning_rate": 0.00035404191616766466,
"loss": 0.1748,
"step": 880
},
{
"epoch": 0.9632034632034632,
"grad_norm": 0.19921875,
"learning_rate": 0.00035217065868263476,
"loss": 0.1672,
"step": 890
},
{
"epoch": 0.974025974025974,
"grad_norm": 0.330078125,
"learning_rate": 0.0003502994011976048,
"loss": 0.1699,
"step": 900
},
{
"epoch": 0.974025974025974,
"eval_loss": 0.16794857382774353,
"eval_runtime": 58.792,
"eval_samples_per_second": 8.505,
"eval_steps_per_second": 0.068,
"step": 900
},
{
"epoch": 0.9848484848484849,
"grad_norm": 0.1435546875,
"learning_rate": 0.00034842814371257485,
"loss": 0.1537,
"step": 910
},
{
"epoch": 0.9956709956709957,
"grad_norm": 0.2421875,
"learning_rate": 0.00034655688622754494,
"loss": 0.1629,
"step": 920
},
{
"epoch": 1.0064935064935066,
"grad_norm": 0.19140625,
"learning_rate": 0.000344685628742515,
"loss": 0.1499,
"step": 930
},
{
"epoch": 1.0173160173160174,
"grad_norm": 0.28125,
"learning_rate": 0.00034281437125748503,
"loss": 0.1464,
"step": 940
},
{
"epoch": 1.0281385281385282,
"grad_norm": 0.173828125,
"learning_rate": 0.00034094311377245507,
"loss": 0.1416,
"step": 950
},
{
"epoch": 1.0389610389610389,
"grad_norm": 0.2119140625,
"learning_rate": 0.00033907185628742517,
"loss": 0.1545,
"step": 960
},
{
"epoch": 1.0497835497835497,
"grad_norm": 0.18359375,
"learning_rate": 0.0003372005988023952,
"loss": 0.1431,
"step": 970
},
{
"epoch": 1.0606060606060606,
"grad_norm": 0.279296875,
"learning_rate": 0.00033532934131736525,
"loss": 0.1607,
"step": 980
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.384765625,
"learning_rate": 0.00033345808383233535,
"loss": 0.1602,
"step": 990
},
{
"epoch": 1.0822510822510822,
"grad_norm": 0.2470703125,
"learning_rate": 0.0003315868263473054,
"loss": 0.1651,
"step": 1000
},
{
"epoch": 1.0822510822510822,
"eval_loss": 0.1569806933403015,
"eval_runtime": 57.763,
"eval_samples_per_second": 8.656,
"eval_steps_per_second": 0.069,
"step": 1000
},
{
"epoch": 1.093073593073593,
"grad_norm": 0.2734375,
"learning_rate": 0.00032971556886227544,
"loss": 0.146,
"step": 1010
},
{
"epoch": 1.103896103896104,
"grad_norm": 0.275390625,
"learning_rate": 0.00032784431137724553,
"loss": 0.1341,
"step": 1020
},
{
"epoch": 1.1147186147186148,
"grad_norm": 0.1640625,
"learning_rate": 0.0003259730538922156,
"loss": 0.1412,
"step": 1030
},
{
"epoch": 1.1255411255411256,
"grad_norm": 0.21875,
"learning_rate": 0.0003241017964071856,
"loss": 0.152,
"step": 1040
},
{
"epoch": 1.1363636363636362,
"grad_norm": 0.1962890625,
"learning_rate": 0.0003222305389221557,
"loss": 0.1501,
"step": 1050
},
{
"epoch": 1.1471861471861473,
"grad_norm": 0.1337890625,
"learning_rate": 0.00032035928143712576,
"loss": 0.1359,
"step": 1060
},
{
"epoch": 1.158008658008658,
"grad_norm": 0.271484375,
"learning_rate": 0.0003184880239520958,
"loss": 0.1437,
"step": 1070
},
{
"epoch": 1.1688311688311688,
"grad_norm": 0.2216796875,
"learning_rate": 0.0003166167664670659,
"loss": 0.1395,
"step": 1080
},
{
"epoch": 1.1796536796536796,
"grad_norm": 0.314453125,
"learning_rate": 0.0003147455089820359,
"loss": 0.1403,
"step": 1090
},
{
"epoch": 1.1904761904761905,
"grad_norm": 0.2236328125,
"learning_rate": 0.000312874251497006,
"loss": 0.157,
"step": 1100
},
{
"epoch": 1.1904761904761905,
"eval_loss": 0.15362851321697235,
"eval_runtime": 50.1243,
"eval_samples_per_second": 9.975,
"eval_steps_per_second": 0.08,
"step": 1100
},
{
"epoch": 1.2012987012987013,
"grad_norm": 0.2177734375,
"learning_rate": 0.0003110029940119761,
"loss": 0.1377,
"step": 1110
},
{
"epoch": 1.2121212121212122,
"grad_norm": 0.2138671875,
"learning_rate": 0.0003091317365269461,
"loss": 0.1423,
"step": 1120
},
{
"epoch": 1.222943722943723,
"grad_norm": 0.1953125,
"learning_rate": 0.00030726047904191617,
"loss": 0.1353,
"step": 1130
},
{
"epoch": 1.2337662337662338,
"grad_norm": 0.255859375,
"learning_rate": 0.00030538922155688627,
"loss": 0.1453,
"step": 1140
},
{
"epoch": 1.2445887445887447,
"grad_norm": 0.3828125,
"learning_rate": 0.00030351796407185626,
"loss": 0.1398,
"step": 1150
},
{
"epoch": 1.2554112554112553,
"grad_norm": 0.189453125,
"learning_rate": 0.00030164670658682635,
"loss": 0.1315,
"step": 1160
},
{
"epoch": 1.2662337662337662,
"grad_norm": 0.26953125,
"learning_rate": 0.00029977544910179645,
"loss": 0.1323,
"step": 1170
},
{
"epoch": 1.277056277056277,
"grad_norm": 0.1513671875,
"learning_rate": 0.00029790419161676644,
"loss": 0.134,
"step": 1180
},
{
"epoch": 1.2878787878787878,
"grad_norm": 0.3046875,
"learning_rate": 0.00029603293413173654,
"loss": 0.1544,
"step": 1190
},
{
"epoch": 1.2987012987012987,
"grad_norm": 0.236328125,
"learning_rate": 0.0002941616766467066,
"loss": 0.141,
"step": 1200
},
{
"epoch": 1.2987012987012987,
"eval_loss": 0.1604161411523819,
"eval_runtime": 51.9642,
"eval_samples_per_second": 9.622,
"eval_steps_per_second": 0.077,
"step": 1200
},
{
"epoch": 1.3095238095238095,
"grad_norm": 0.14453125,
"learning_rate": 0.0002922904191616766,
"loss": 0.1405,
"step": 1210
},
{
"epoch": 1.3203463203463204,
"grad_norm": 0.154296875,
"learning_rate": 0.0002904191616766467,
"loss": 0.1388,
"step": 1220
},
{
"epoch": 1.3311688311688312,
"grad_norm": 0.1572265625,
"learning_rate": 0.00028854790419161676,
"loss": 0.1482,
"step": 1230
},
{
"epoch": 1.341991341991342,
"grad_norm": 0.201171875,
"learning_rate": 0.0002866766467065868,
"loss": 0.1299,
"step": 1240
},
{
"epoch": 1.3528138528138527,
"grad_norm": 0.2265625,
"learning_rate": 0.0002848053892215569,
"loss": 0.137,
"step": 1250
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.2255859375,
"learning_rate": 0.00028293413173652695,
"loss": 0.1173,
"step": 1260
},
{
"epoch": 1.3744588744588744,
"grad_norm": 0.25,
"learning_rate": 0.000281062874251497,
"loss": 0.1317,
"step": 1270
},
{
"epoch": 1.3852813852813852,
"grad_norm": 0.1455078125,
"learning_rate": 0.0002791916167664671,
"loss": 0.1475,
"step": 1280
},
{
"epoch": 1.396103896103896,
"grad_norm": 0.150390625,
"learning_rate": 0.00027732035928143713,
"loss": 0.1411,
"step": 1290
},
{
"epoch": 1.406926406926407,
"grad_norm": 0.1552734375,
"learning_rate": 0.00027544910179640717,
"loss": 0.1343,
"step": 1300
},
{
"epoch": 1.406926406926407,
"eval_loss": 0.1617971956729889,
"eval_runtime": 53.8477,
"eval_samples_per_second": 9.285,
"eval_steps_per_second": 0.074,
"step": 1300
},
{
"epoch": 1.4177489177489178,
"grad_norm": 0.26171875,
"learning_rate": 0.00027357784431137727,
"loss": 0.1367,
"step": 1310
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.154296875,
"learning_rate": 0.0002717065868263473,
"loss": 0.1359,
"step": 1320
},
{
"epoch": 1.4393939393939394,
"grad_norm": 0.208984375,
"learning_rate": 0.00026983532934131735,
"loss": 0.1257,
"step": 1330
},
{
"epoch": 1.4502164502164503,
"grad_norm": 0.1865234375,
"learning_rate": 0.0002679640718562874,
"loss": 0.1282,
"step": 1340
},
{
"epoch": 1.4610389610389611,
"grad_norm": 0.1767578125,
"learning_rate": 0.0002660928143712575,
"loss": 0.1266,
"step": 1350
},
{
"epoch": 1.4718614718614718,
"grad_norm": 0.1982421875,
"learning_rate": 0.0002642215568862276,
"loss": 0.1204,
"step": 1360
},
{
"epoch": 1.4826839826839826,
"grad_norm": 0.1962890625,
"learning_rate": 0.0002623502994011976,
"loss": 0.151,
"step": 1370
},
{
"epoch": 1.4935064935064934,
"grad_norm": 0.20703125,
"learning_rate": 0.0002604790419161677,
"loss": 0.1336,
"step": 1380
},
{
"epoch": 1.5043290043290043,
"grad_norm": 0.1376953125,
"learning_rate": 0.0002586077844311378,
"loss": 0.1364,
"step": 1390
},
{
"epoch": 1.5151515151515151,
"grad_norm": 0.333984375,
"learning_rate": 0.00025673652694610776,
"loss": 0.1466,
"step": 1400
},
{
"epoch": 1.5151515151515151,
"eval_loss": 0.165997713804245,
"eval_runtime": 49.4239,
"eval_samples_per_second": 10.117,
"eval_steps_per_second": 0.081,
"step": 1400
},
{
"epoch": 1.525974025974026,
"grad_norm": 0.154296875,
"learning_rate": 0.00025486526946107786,
"loss": 0.1306,
"step": 1410
},
{
"epoch": 1.5367965367965368,
"grad_norm": 0.1630859375,
"learning_rate": 0.00025299401197604796,
"loss": 0.1246,
"step": 1420
},
{
"epoch": 1.5476190476190477,
"grad_norm": 0.1904296875,
"learning_rate": 0.00025112275449101795,
"loss": 0.1333,
"step": 1430
},
{
"epoch": 1.5584415584415585,
"grad_norm": 0.201171875,
"learning_rate": 0.00024925149700598804,
"loss": 0.1258,
"step": 1440
},
{
"epoch": 1.5692640692640691,
"grad_norm": 0.244140625,
"learning_rate": 0.0002473802395209581,
"loss": 0.1493,
"step": 1450
},
{
"epoch": 1.5800865800865802,
"grad_norm": 0.1279296875,
"learning_rate": 0.00024550898203592813,
"loss": 0.1346,
"step": 1460
},
{
"epoch": 1.5909090909090908,
"grad_norm": 0.154296875,
"learning_rate": 0.00024363772455089823,
"loss": 0.1228,
"step": 1470
},
{
"epoch": 1.601731601731602,
"grad_norm": 0.22265625,
"learning_rate": 0.00024176646706586827,
"loss": 0.1335,
"step": 1480
},
{
"epoch": 1.6125541125541125,
"grad_norm": 0.2333984375,
"learning_rate": 0.0002398952095808383,
"loss": 0.137,
"step": 1490
},
{
"epoch": 1.6233766233766234,
"grad_norm": 0.197265625,
"learning_rate": 0.00023802395209580838,
"loss": 0.1351,
"step": 1500
},
{
"epoch": 1.6233766233766234,
"eval_loss": 0.1522991955280304,
"eval_runtime": 49.2679,
"eval_samples_per_second": 10.149,
"eval_steps_per_second": 0.081,
"step": 1500
},
{
"epoch": 1.6341991341991342,
"grad_norm": 0.330078125,
"learning_rate": 0.00023615269461077845,
"loss": 0.1361,
"step": 1510
},
{
"epoch": 1.645021645021645,
"grad_norm": 0.140625,
"learning_rate": 0.0002342814371257485,
"loss": 0.1383,
"step": 1520
},
{
"epoch": 1.655844155844156,
"grad_norm": 0.1826171875,
"learning_rate": 0.00023241017964071857,
"loss": 0.135,
"step": 1530
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.2197265625,
"learning_rate": 0.00023053892215568864,
"loss": 0.1298,
"step": 1540
},
{
"epoch": 1.6774891774891776,
"grad_norm": 0.2080078125,
"learning_rate": 0.00022866766467065868,
"loss": 0.1323,
"step": 1550
},
{
"epoch": 1.6883116883116882,
"grad_norm": 0.232421875,
"learning_rate": 0.00022679640718562875,
"loss": 0.1253,
"step": 1560
},
{
"epoch": 1.6991341991341993,
"grad_norm": 0.130859375,
"learning_rate": 0.0002249251497005988,
"loss": 0.1324,
"step": 1570
},
{
"epoch": 1.70995670995671,
"grad_norm": 0.1865234375,
"learning_rate": 0.0002230538922155689,
"loss": 0.1402,
"step": 1580
},
{
"epoch": 1.7207792207792207,
"grad_norm": 0.1875,
"learning_rate": 0.00022118263473053893,
"loss": 0.1307,
"step": 1590
},
{
"epoch": 1.7316017316017316,
"grad_norm": 0.12890625,
"learning_rate": 0.00021931137724550898,
"loss": 0.1276,
"step": 1600
},
{
"epoch": 1.7316017316017316,
"eval_loss": 0.1606496274471283,
"eval_runtime": 51.8902,
"eval_samples_per_second": 9.636,
"eval_steps_per_second": 0.077,
"step": 1600
},
{
"epoch": 1.7424242424242424,
"grad_norm": 0.1455078125,
"learning_rate": 0.00021744011976047905,
"loss": 0.1302,
"step": 1610
},
{
"epoch": 1.7532467532467533,
"grad_norm": 0.1455078125,
"learning_rate": 0.00021556886227544912,
"loss": 0.1231,
"step": 1620
},
{
"epoch": 1.7640692640692641,
"grad_norm": 0.162109375,
"learning_rate": 0.00021369760479041916,
"loss": 0.1362,
"step": 1630
},
{
"epoch": 1.774891774891775,
"grad_norm": 0.16796875,
"learning_rate": 0.00021182634730538923,
"loss": 0.1264,
"step": 1640
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.25390625,
"learning_rate": 0.0002099550898203593,
"loss": 0.142,
"step": 1650
},
{
"epoch": 1.7965367965367967,
"grad_norm": 0.1279296875,
"learning_rate": 0.00020808383233532934,
"loss": 0.1347,
"step": 1660
},
{
"epoch": 1.8073593073593073,
"grad_norm": 0.1708984375,
"learning_rate": 0.0002062125748502994,
"loss": 0.1399,
"step": 1670
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.24609375,
"learning_rate": 0.00020434131736526945,
"loss": 0.1236,
"step": 1680
},
{
"epoch": 1.829004329004329,
"grad_norm": 0.2255859375,
"learning_rate": 0.00020247005988023952,
"loss": 0.113,
"step": 1690
},
{
"epoch": 1.8398268398268398,
"grad_norm": 0.2421875,
"learning_rate": 0.0002005988023952096,
"loss": 0.1239,
"step": 1700
},
{
"epoch": 1.8398268398268398,
"eval_loss": 0.16243509948253632,
"eval_runtime": 54.1441,
"eval_samples_per_second": 9.235,
"eval_steps_per_second": 0.074,
"step": 1700
},
{
"epoch": 1.8506493506493507,
"grad_norm": 0.1455078125,
"learning_rate": 0.00019872754491017964,
"loss": 0.1257,
"step": 1710
},
{
"epoch": 1.8614718614718615,
"grad_norm": 0.1982421875,
"learning_rate": 0.0001968562874251497,
"loss": 0.151,
"step": 1720
},
{
"epoch": 1.8722943722943723,
"grad_norm": 0.30078125,
"learning_rate": 0.00019498502994011978,
"loss": 0.1238,
"step": 1730
},
{
"epoch": 1.883116883116883,
"grad_norm": 0.2734375,
"learning_rate": 0.00019311377245508982,
"loss": 0.1447,
"step": 1740
},
{
"epoch": 1.893939393939394,
"grad_norm": 0.1728515625,
"learning_rate": 0.00019124251497005986,
"loss": 0.14,
"step": 1750
},
{
"epoch": 1.9047619047619047,
"grad_norm": 0.146484375,
"learning_rate": 0.00018937125748502996,
"loss": 0.118,
"step": 1760
},
{
"epoch": 1.9155844155844157,
"grad_norm": 0.275390625,
"learning_rate": 0.0001875,
"loss": 0.126,
"step": 1770
},
{
"epoch": 1.9264069264069263,
"grad_norm": 0.19140625,
"learning_rate": 0.00018562874251497005,
"loss": 0.1399,
"step": 1780
},
{
"epoch": 1.9372294372294372,
"grad_norm": 0.240234375,
"learning_rate": 0.00018375748502994014,
"loss": 0.1291,
"step": 1790
},
{
"epoch": 1.948051948051948,
"grad_norm": 0.205078125,
"learning_rate": 0.0001818862275449102,
"loss": 0.1339,
"step": 1800
},
{
"epoch": 1.948051948051948,
"eval_loss": 0.1591983437538147,
"eval_runtime": 47.2348,
"eval_samples_per_second": 10.585,
"eval_steps_per_second": 0.085,
"step": 1800
},
{
"epoch": 1.9588744588744589,
"grad_norm": 0.22265625,
"learning_rate": 0.00018001497005988023,
"loss": 0.1414,
"step": 1810
},
{
"epoch": 1.9696969696969697,
"grad_norm": 0.2080078125,
"learning_rate": 0.0001781437125748503,
"loss": 0.116,
"step": 1820
},
{
"epoch": 1.9805194805194806,
"grad_norm": 0.265625,
"learning_rate": 0.00017627245508982037,
"loss": 0.1279,
"step": 1830
},
{
"epoch": 1.9913419913419914,
"grad_norm": 0.2236328125,
"learning_rate": 0.0001744011976047904,
"loss": 0.1136,
"step": 1840
},
{
"epoch": 2.002164502164502,
"grad_norm": 0.11669921875,
"learning_rate": 0.00017252994011976048,
"loss": 0.1158,
"step": 1850
},
{
"epoch": 2.012987012987013,
"grad_norm": 0.12353515625,
"learning_rate": 0.00017065868263473055,
"loss": 0.113,
"step": 1860
},
{
"epoch": 2.0238095238095237,
"grad_norm": 0.234375,
"learning_rate": 0.0001687874251497006,
"loss": 0.1127,
"step": 1870
},
{
"epoch": 2.034632034632035,
"grad_norm": 0.1728515625,
"learning_rate": 0.00016691616766467067,
"loss": 0.1289,
"step": 1880
},
{
"epoch": 2.0454545454545454,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001650449101796407,
"loss": 0.1236,
"step": 1890
},
{
"epoch": 2.0562770562770565,
"grad_norm": 0.2080078125,
"learning_rate": 0.00016317365269461078,
"loss": 0.114,
"step": 1900
},
{
"epoch": 2.0562770562770565,
"eval_loss": 0.15889017283916473,
"eval_runtime": 49.4787,
"eval_samples_per_second": 10.105,
"eval_steps_per_second": 0.081,
"step": 1900
},
{
"epoch": 2.067099567099567,
"grad_norm": 0.1875,
"learning_rate": 0.00016130239520958085,
"loss": 0.1202,
"step": 1910
},
{
"epoch": 2.0779220779220777,
"grad_norm": 0.30078125,
"learning_rate": 0.0001594311377245509,
"loss": 0.1236,
"step": 1920
},
{
"epoch": 2.088744588744589,
"grad_norm": 0.1630859375,
"learning_rate": 0.00015755988023952096,
"loss": 0.1151,
"step": 1930
},
{
"epoch": 2.0995670995670994,
"grad_norm": 0.1806640625,
"learning_rate": 0.00015568862275449103,
"loss": 0.116,
"step": 1940
},
{
"epoch": 2.1103896103896105,
"grad_norm": 0.20703125,
"learning_rate": 0.00015381736526946108,
"loss": 0.1376,
"step": 1950
},
{
"epoch": 2.121212121212121,
"grad_norm": 0.1865234375,
"learning_rate": 0.00015194610778443112,
"loss": 0.1189,
"step": 1960
},
{
"epoch": 2.132034632034632,
"grad_norm": 0.1103515625,
"learning_rate": 0.00015007485029940122,
"loss": 0.1122,
"step": 1970
},
{
"epoch": 2.142857142857143,
"grad_norm": 0.1494140625,
"learning_rate": 0.00014820359281437126,
"loss": 0.1159,
"step": 1980
},
{
"epoch": 2.153679653679654,
"grad_norm": 0.1826171875,
"learning_rate": 0.00014633233532934133,
"loss": 0.1139,
"step": 1990
},
{
"epoch": 2.1645021645021645,
"grad_norm": 0.234375,
"learning_rate": 0.00014446107784431137,
"loss": 0.1173,
"step": 2000
},
{
"epoch": 2.1645021645021645,
"eval_loss": 0.16071972250938416,
"eval_runtime": 53.7032,
"eval_samples_per_second": 9.31,
"eval_steps_per_second": 0.074,
"step": 2000
},
{
"epoch": 2.175324675324675,
"grad_norm": 0.10498046875,
"learning_rate": 0.00014258982035928144,
"loss": 0.0994,
"step": 2010
},
{
"epoch": 2.186147186147186,
"grad_norm": 0.1630859375,
"learning_rate": 0.0001407185628742515,
"loss": 0.1135,
"step": 2020
},
{
"epoch": 2.196969696969697,
"grad_norm": 0.1884765625,
"learning_rate": 0.00013884730538922155,
"loss": 0.1186,
"step": 2030
},
{
"epoch": 2.207792207792208,
"grad_norm": 0.1904296875,
"learning_rate": 0.00013697604790419162,
"loss": 0.1066,
"step": 2040
},
{
"epoch": 2.2186147186147185,
"grad_norm": 0.1923828125,
"learning_rate": 0.0001351047904191617,
"loss": 0.0979,
"step": 2050
},
{
"epoch": 2.2294372294372296,
"grad_norm": 0.10546875,
"learning_rate": 0.00013323353293413174,
"loss": 0.1214,
"step": 2060
},
{
"epoch": 2.24025974025974,
"grad_norm": 0.1259765625,
"learning_rate": 0.00013136227544910178,
"loss": 0.1086,
"step": 2070
},
{
"epoch": 2.2510822510822512,
"grad_norm": 0.2060546875,
"learning_rate": 0.00012949101796407188,
"loss": 0.0932,
"step": 2080
},
{
"epoch": 2.261904761904762,
"grad_norm": 0.1318359375,
"learning_rate": 0.00012761976047904192,
"loss": 0.1053,
"step": 2090
},
{
"epoch": 2.2727272727272725,
"grad_norm": 0.16015625,
"learning_rate": 0.00012574850299401196,
"loss": 0.1231,
"step": 2100
},
{
"epoch": 2.2727272727272725,
"eval_loss": 0.1609112173318863,
"eval_runtime": 50.2755,
"eval_samples_per_second": 9.945,
"eval_steps_per_second": 0.08,
"step": 2100
},
{
"epoch": 2.2835497835497836,
"grad_norm": 0.1357421875,
"learning_rate": 0.00012387724550898203,
"loss": 0.1207,
"step": 2110
},
{
"epoch": 2.2943722943722946,
"grad_norm": 0.16015625,
"learning_rate": 0.0001220059880239521,
"loss": 0.1202,
"step": 2120
},
{
"epoch": 2.3051948051948052,
"grad_norm": 0.1943359375,
"learning_rate": 0.00012013473053892216,
"loss": 0.1215,
"step": 2130
},
{
"epoch": 2.316017316017316,
"grad_norm": 0.2314453125,
"learning_rate": 0.00011826347305389222,
"loss": 0.112,
"step": 2140
},
{
"epoch": 2.326839826839827,
"grad_norm": 0.1767578125,
"learning_rate": 0.00011639221556886227,
"loss": 0.1233,
"step": 2150
},
{
"epoch": 2.3376623376623376,
"grad_norm": 0.16015625,
"learning_rate": 0.00011452095808383234,
"loss": 0.1086,
"step": 2160
},
{
"epoch": 2.3484848484848486,
"grad_norm": 0.138671875,
"learning_rate": 0.0001126497005988024,
"loss": 0.1123,
"step": 2170
},
{
"epoch": 2.3593073593073592,
"grad_norm": 0.193359375,
"learning_rate": 0.00011077844311377246,
"loss": 0.1145,
"step": 2180
},
{
"epoch": 2.3701298701298703,
"grad_norm": 0.2216796875,
"learning_rate": 0.00010890718562874253,
"loss": 0.1117,
"step": 2190
},
{
"epoch": 2.380952380952381,
"grad_norm": 0.2890625,
"learning_rate": 0.00010703592814371257,
"loss": 0.12,
"step": 2200
},
{
"epoch": 2.380952380952381,
"eval_loss": 0.16090266406536102,
"eval_runtime": 59.1415,
"eval_samples_per_second": 8.454,
"eval_steps_per_second": 0.068,
"step": 2200
},
{
"epoch": 2.391774891774892,
"grad_norm": 0.220703125,
"learning_rate": 0.00010516467065868264,
"loss": 0.11,
"step": 2210
},
{
"epoch": 2.4025974025974026,
"grad_norm": 0.1728515625,
"learning_rate": 0.0001032934131736527,
"loss": 0.1198,
"step": 2220
},
{
"epoch": 2.4134199134199132,
"grad_norm": 0.23828125,
"learning_rate": 0.00010142215568862275,
"loss": 0.1179,
"step": 2230
},
{
"epoch": 2.4242424242424243,
"grad_norm": 0.197265625,
"learning_rate": 9.955089820359281e-05,
"loss": 0.1146,
"step": 2240
},
{
"epoch": 2.435064935064935,
"grad_norm": 0.1953125,
"learning_rate": 9.767964071856288e-05,
"loss": 0.1067,
"step": 2250
},
{
"epoch": 2.445887445887446,
"grad_norm": 0.158203125,
"learning_rate": 9.580838323353294e-05,
"loss": 0.105,
"step": 2260
},
{
"epoch": 2.4567099567099566,
"grad_norm": 0.189453125,
"learning_rate": 9.393712574850299e-05,
"loss": 0.1222,
"step": 2270
},
{
"epoch": 2.4675324675324677,
"grad_norm": 0.1376953125,
"learning_rate": 9.206586826347306e-05,
"loss": 0.1068,
"step": 2280
},
{
"epoch": 2.4783549783549783,
"grad_norm": 0.1640625,
"learning_rate": 9.01946107784431e-05,
"loss": 0.1069,
"step": 2290
},
{
"epoch": 2.4891774891774894,
"grad_norm": 0.1728515625,
"learning_rate": 8.832335329341318e-05,
"loss": 0.1276,
"step": 2300
},
{
"epoch": 2.4891774891774894,
"eval_loss": 0.15976013243198395,
"eval_runtime": 51.0014,
"eval_samples_per_second": 9.804,
"eval_steps_per_second": 0.078,
"step": 2300
},
{
"epoch": 2.5,
"grad_norm": 0.2431640625,
"learning_rate": 8.645209580838323e-05,
"loss": 0.1248,
"step": 2310
},
{
"epoch": 2.5108225108225106,
"grad_norm": 0.146484375,
"learning_rate": 8.45808383233533e-05,
"loss": 0.1062,
"step": 2320
},
{
"epoch": 2.5216450216450217,
"grad_norm": 0.1279296875,
"learning_rate": 8.270958083832336e-05,
"loss": 0.0997,
"step": 2330
},
{
"epoch": 2.5324675324675323,
"grad_norm": 0.1962890625,
"learning_rate": 8.083832335329341e-05,
"loss": 0.1171,
"step": 2340
},
{
"epoch": 2.5432900432900434,
"grad_norm": 0.19921875,
"learning_rate": 7.896706586826349e-05,
"loss": 0.0944,
"step": 2350
},
{
"epoch": 2.554112554112554,
"grad_norm": 0.22265625,
"learning_rate": 7.709580838323353e-05,
"loss": 0.1091,
"step": 2360
},
{
"epoch": 2.564935064935065,
"grad_norm": 0.2099609375,
"learning_rate": 7.52245508982036e-05,
"loss": 0.1151,
"step": 2370
},
{
"epoch": 2.5757575757575757,
"grad_norm": 0.1142578125,
"learning_rate": 7.335329341317365e-05,
"loss": 0.1174,
"step": 2380
},
{
"epoch": 2.5865800865800868,
"grad_norm": 0.1494140625,
"learning_rate": 7.148203592814371e-05,
"loss": 0.1219,
"step": 2390
},
{
"epoch": 2.5974025974025974,
"grad_norm": 0.244140625,
"learning_rate": 6.961077844311377e-05,
"loss": 0.1251,
"step": 2400
},
{
"epoch": 2.5974025974025974,
"eval_loss": 0.1569649577140808,
"eval_runtime": 50.4125,
"eval_samples_per_second": 9.918,
"eval_steps_per_second": 0.079,
"step": 2400
},
{
"epoch": 2.608225108225108,
"grad_norm": 0.34375,
"learning_rate": 6.773952095808384e-05,
"loss": 0.1171,
"step": 2410
},
{
"epoch": 2.619047619047619,
"grad_norm": 0.1630859375,
"learning_rate": 6.58682634730539e-05,
"loss": 0.1114,
"step": 2420
},
{
"epoch": 2.62987012987013,
"grad_norm": 0.208984375,
"learning_rate": 6.399700598802395e-05,
"loss": 0.1046,
"step": 2430
},
{
"epoch": 2.6406926406926408,
"grad_norm": 0.08154296875,
"learning_rate": 6.212574850299401e-05,
"loss": 0.1081,
"step": 2440
},
{
"epoch": 2.6515151515151514,
"grad_norm": 0.142578125,
"learning_rate": 6.025449101796408e-05,
"loss": 0.1042,
"step": 2450
},
{
"epoch": 2.6623376623376624,
"grad_norm": 0.1474609375,
"learning_rate": 5.8383233532934134e-05,
"loss": 0.0913,
"step": 2460
},
{
"epoch": 2.673160173160173,
"grad_norm": 0.330078125,
"learning_rate": 5.65119760479042e-05,
"loss": 0.1082,
"step": 2470
},
{
"epoch": 2.683982683982684,
"grad_norm": 0.173828125,
"learning_rate": 5.4640718562874254e-05,
"loss": 0.1158,
"step": 2480
},
{
"epoch": 2.6948051948051948,
"grad_norm": 0.1689453125,
"learning_rate": 5.276946107784431e-05,
"loss": 0.1135,
"step": 2490
},
{
"epoch": 2.7056277056277054,
"grad_norm": 0.2099609375,
"learning_rate": 5.089820359281437e-05,
"loss": 0.1332,
"step": 2500
},
{
"epoch": 2.7056277056277054,
"eval_loss": 0.15841823816299438,
"eval_runtime": 48.0255,
"eval_samples_per_second": 10.411,
"eval_steps_per_second": 0.083,
"step": 2500
},
{
"epoch": 2.7164502164502164,
"grad_norm": 0.140625,
"learning_rate": 4.902694610778443e-05,
"loss": 0.1076,
"step": 2510
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.11572265625,
"learning_rate": 4.7155688622754486e-05,
"loss": 0.1089,
"step": 2520
},
{
"epoch": 2.738095238095238,
"grad_norm": 0.1484375,
"learning_rate": 4.5284431137724556e-05,
"loss": 0.1061,
"step": 2530
},
{
"epoch": 2.7489177489177488,
"grad_norm": 0.123046875,
"learning_rate": 4.341317365269461e-05,
"loss": 0.1067,
"step": 2540
},
{
"epoch": 2.75974025974026,
"grad_norm": 0.201171875,
"learning_rate": 4.154191616766467e-05,
"loss": 0.1113,
"step": 2550
},
{
"epoch": 2.7705627705627704,
"grad_norm": 0.1494140625,
"learning_rate": 3.967065868263473e-05,
"loss": 0.1056,
"step": 2560
},
{
"epoch": 2.7813852813852815,
"grad_norm": 0.181640625,
"learning_rate": 3.779940119760479e-05,
"loss": 0.1135,
"step": 2570
},
{
"epoch": 2.792207792207792,
"grad_norm": 0.15625,
"learning_rate": 3.592814371257485e-05,
"loss": 0.1116,
"step": 2580
},
{
"epoch": 2.8030303030303028,
"grad_norm": 0.2255859375,
"learning_rate": 3.405688622754491e-05,
"loss": 0.1169,
"step": 2590
},
{
"epoch": 2.813852813852814,
"grad_norm": 0.08154296875,
"learning_rate": 3.2185628742514966e-05,
"loss": 0.1023,
"step": 2600
},
{
"epoch": 2.813852813852814,
"eval_loss": 0.15775570273399353,
"eval_runtime": 49.2082,
"eval_samples_per_second": 10.161,
"eval_steps_per_second": 0.081,
"step": 2600
},
{
"epoch": 2.824675324675325,
"grad_norm": 0.103515625,
"learning_rate": 3.031437125748503e-05,
"loss": 0.1076,
"step": 2610
},
{
"epoch": 2.8354978354978355,
"grad_norm": 0.1611328125,
"learning_rate": 2.844311377245509e-05,
"loss": 0.1082,
"step": 2620
},
{
"epoch": 2.846320346320346,
"grad_norm": 0.28515625,
"learning_rate": 2.6571856287425152e-05,
"loss": 0.1125,
"step": 2630
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.07568359375,
"learning_rate": 2.4700598802395212e-05,
"loss": 0.0981,
"step": 2640
},
{
"epoch": 2.867965367965368,
"grad_norm": 0.169921875,
"learning_rate": 2.282934131736527e-05,
"loss": 0.1197,
"step": 2650
},
{
"epoch": 2.878787878787879,
"grad_norm": 0.34765625,
"learning_rate": 2.095808383233533e-05,
"loss": 0.1214,
"step": 2660
},
{
"epoch": 2.8896103896103895,
"grad_norm": 0.1513671875,
"learning_rate": 1.9086826347305392e-05,
"loss": 0.105,
"step": 2670
},
{
"epoch": 2.9004329004329006,
"grad_norm": 0.1767578125,
"learning_rate": 1.7215568862275448e-05,
"loss": 0.1101,
"step": 2680
},
{
"epoch": 2.911255411255411,
"grad_norm": 0.169921875,
"learning_rate": 1.5344311377245508e-05,
"loss": 0.1283,
"step": 2690
},
{
"epoch": 2.9220779220779223,
"grad_norm": 0.130859375,
"learning_rate": 1.347305389221557e-05,
"loss": 0.1126,
"step": 2700
},
{
"epoch": 2.9220779220779223,
"eval_loss": 0.1572347730398178,
"eval_runtime": 58.0947,
"eval_samples_per_second": 8.607,
"eval_steps_per_second": 0.069,
"step": 2700
},
{
"epoch": 2.932900432900433,
"grad_norm": 0.197265625,
"learning_rate": 1.1601796407185628e-05,
"loss": 0.1127,
"step": 2710
},
{
"epoch": 2.9437229437229435,
"grad_norm": 0.259765625,
"learning_rate": 9.73053892215569e-06,
"loss": 0.1236,
"step": 2720
},
{
"epoch": 2.9545454545454546,
"grad_norm": 0.1416015625,
"learning_rate": 7.859281437125748e-06,
"loss": 0.1137,
"step": 2730
},
{
"epoch": 2.965367965367965,
"grad_norm": 0.2041015625,
"learning_rate": 5.9880239520958085e-06,
"loss": 0.1213,
"step": 2740
},
{
"epoch": 2.9761904761904763,
"grad_norm": 0.1728515625,
"learning_rate": 4.116766467065868e-06,
"loss": 0.1083,
"step": 2750
},
{
"epoch": 2.987012987012987,
"grad_norm": 0.2099609375,
"learning_rate": 2.2455089820359283e-06,
"loss": 0.1217,
"step": 2760
},
{
"epoch": 2.997835497835498,
"grad_norm": 0.2099609375,
"learning_rate": 3.7425149700598803e-07,
"loss": 0.1188,
"step": 2770
}
],
"logging_steps": 10,
"max_steps": 2772,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.887405812854188e+18,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}