ryan04152024_ALLDATA / trainer_state.json
rshrott's picture
🍻 cheers
285bd58 verified
raw
history blame contribute delete
No virus
21.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0200438459129346,
"eval_steps": 100,
"global_step": 3257,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.6797587275505066,
"learning_rate": 4.961621123733498e-05,
"loss": 0.5293,
"step": 25
},
{
"epoch": 0.02,
"grad_norm": 0.559562087059021,
"learning_rate": 4.9232422474669945e-05,
"loss": 0.3908,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 0.6713371872901917,
"learning_rate": 4.884863371200491e-05,
"loss": 0.4101,
"step": 75
},
{
"epoch": 0.03,
"grad_norm": 2.220188617706299,
"learning_rate": 4.846484494933989e-05,
"loss": 0.3246,
"step": 100
},
{
"epoch": 0.04,
"grad_norm": 0.7794200778007507,
"learning_rate": 4.8081056186674856e-05,
"loss": 0.3521,
"step": 125
},
{
"epoch": 0.05,
"grad_norm": 2.3898696899414062,
"learning_rate": 4.769726742400983e-05,
"loss": 0.338,
"step": 150
},
{
"epoch": 0.05,
"grad_norm": 1.896946668624878,
"learning_rate": 4.731347866134479e-05,
"loss": 0.3142,
"step": 175
},
{
"epoch": 0.06,
"grad_norm": 0.35194501280784607,
"learning_rate": 4.692968989867977e-05,
"loss": 0.3061,
"step": 200
},
{
"epoch": 0.07,
"grad_norm": 0.6125898957252502,
"learning_rate": 4.654590113601474e-05,
"loss": 0.3291,
"step": 225
},
{
"epoch": 0.08,
"grad_norm": 0.36999431252479553,
"learning_rate": 4.616211237334971e-05,
"loss": 0.3392,
"step": 250
},
{
"epoch": 0.09,
"grad_norm": 0.5263044834136963,
"learning_rate": 4.5793675161191285e-05,
"loss": 0.3102,
"step": 275
},
{
"epoch": 0.09,
"grad_norm": 0.3138100802898407,
"learning_rate": 4.542523794903286e-05,
"loss": 0.3065,
"step": 300
},
{
"epoch": 0.1,
"grad_norm": 0.38833898305892944,
"learning_rate": 4.504144918636782e-05,
"loss": 0.3586,
"step": 325
},
{
"epoch": 0.11,
"grad_norm": 2.349430799484253,
"learning_rate": 4.4657660423702795e-05,
"loss": 0.3072,
"step": 350
},
{
"epoch": 0.12,
"grad_norm": 0.3345746397972107,
"learning_rate": 4.427387166103777e-05,
"loss": 0.2855,
"step": 375
},
{
"epoch": 0.13,
"grad_norm": 0.46446025371551514,
"learning_rate": 4.390543444887934e-05,
"loss": 0.2852,
"step": 400
},
{
"epoch": 0.13,
"grad_norm": 0.6488687992095947,
"learning_rate": 4.3521645686214306e-05,
"loss": 0.2731,
"step": 425
},
{
"epoch": 0.14,
"grad_norm": 0.7198542356491089,
"learning_rate": 4.313785692354928e-05,
"loss": 0.2986,
"step": 450
},
{
"epoch": 0.15,
"grad_norm": 1.6638888120651245,
"learning_rate": 4.2754068160884256e-05,
"loss": 0.2985,
"step": 475
},
{
"epoch": 0.16,
"grad_norm": 1.4277877807617188,
"learning_rate": 4.237027939821922e-05,
"loss": 0.2559,
"step": 500
},
{
"epoch": 0.16,
"grad_norm": 3.699174642562866,
"learning_rate": 4.198649063555419e-05,
"loss": 0.2731,
"step": 525
},
{
"epoch": 0.17,
"grad_norm": 1.9921191930770874,
"learning_rate": 4.160270187288917e-05,
"loss": 0.242,
"step": 550
},
{
"epoch": 0.18,
"grad_norm": 0.3546009361743927,
"learning_rate": 4.1218913110224135e-05,
"loss": 0.253,
"step": 575
},
{
"epoch": 0.19,
"grad_norm": 2.4208102226257324,
"learning_rate": 4.08351243475591e-05,
"loss": 0.2734,
"step": 600
},
{
"epoch": 0.2,
"grad_norm": 1.4212377071380615,
"learning_rate": 4.045133558489408e-05,
"loss": 0.2426,
"step": 625
},
{
"epoch": 0.2,
"grad_norm": 3.6487390995025635,
"learning_rate": 4.0067546822229046e-05,
"loss": 0.2798,
"step": 650
},
{
"epoch": 0.21,
"grad_norm": 2.001326322555542,
"learning_rate": 3.968375805956402e-05,
"loss": 0.2599,
"step": 675
},
{
"epoch": 0.22,
"grad_norm": 1.6598902940750122,
"learning_rate": 3.929996929689899e-05,
"loss": 0.2578,
"step": 700
},
{
"epoch": 0.23,
"grad_norm": 1.3611884117126465,
"learning_rate": 3.8916180534233957e-05,
"loss": 0.2766,
"step": 725
},
{
"epoch": 0.23,
"grad_norm": 0.8574317097663879,
"learning_rate": 3.853239177156893e-05,
"loss": 0.2417,
"step": 750
},
{
"epoch": 0.24,
"grad_norm": 0.8539307713508606,
"learning_rate": 3.81486030089039e-05,
"loss": 0.223,
"step": 775
},
{
"epoch": 0.25,
"grad_norm": 1.1913343667984009,
"learning_rate": 3.7764814246238874e-05,
"loss": 0.2602,
"step": 800
},
{
"epoch": 0.26,
"grad_norm": 0.5884910821914673,
"learning_rate": 3.738102548357384e-05,
"loss": 0.2747,
"step": 825
},
{
"epoch": 0.27,
"grad_norm": 0.9413720965385437,
"learning_rate": 3.699723672090881e-05,
"loss": 0.1835,
"step": 850
},
{
"epoch": 0.27,
"grad_norm": 2.626718521118164,
"learning_rate": 3.6613447958243785e-05,
"loss": 0.2883,
"step": 875
},
{
"epoch": 0.28,
"grad_norm": 1.573789119720459,
"learning_rate": 3.622965919557876e-05,
"loss": 0.252,
"step": 900
},
{
"epoch": 0.29,
"grad_norm": 0.6372594833374023,
"learning_rate": 3.584587043291373e-05,
"loss": 0.2624,
"step": 925
},
{
"epoch": 0.3,
"grad_norm": 0.4254266619682312,
"learning_rate": 3.5462081670248696e-05,
"loss": 0.2012,
"step": 950
},
{
"epoch": 0.31,
"grad_norm": 1.2674840688705444,
"learning_rate": 3.5078292907583664e-05,
"loss": 0.2576,
"step": 975
},
{
"epoch": 0.31,
"grad_norm": 0.6976171731948853,
"learning_rate": 3.469450414491864e-05,
"loss": 0.2374,
"step": 1000
},
{
"epoch": 0.32,
"grad_norm": 2.898336410522461,
"learning_rate": 3.4310715382253614e-05,
"loss": 0.2168,
"step": 1025
},
{
"epoch": 0.33,
"grad_norm": 2.6241133213043213,
"learning_rate": 3.3926926619588575e-05,
"loss": 0.2721,
"step": 1050
},
{
"epoch": 0.34,
"grad_norm": 0.452421635389328,
"learning_rate": 3.354313785692355e-05,
"loss": 0.1887,
"step": 1075
},
{
"epoch": 0.34,
"grad_norm": 4.101943492889404,
"learning_rate": 3.3159349094258525e-05,
"loss": 0.2597,
"step": 1100
},
{
"epoch": 0.35,
"grad_norm": 0.8068587779998779,
"learning_rate": 3.277556033159349e-05,
"loss": 0.1803,
"step": 1125
},
{
"epoch": 0.36,
"grad_norm": 3.24800705909729,
"learning_rate": 3.239177156892847e-05,
"loss": 0.2175,
"step": 1150
},
{
"epoch": 0.37,
"grad_norm": 3.0881152153015137,
"learning_rate": 3.2007982806263435e-05,
"loss": 0.1883,
"step": 1175
},
{
"epoch": 0.38,
"grad_norm": 0.5309311151504517,
"learning_rate": 3.1624194043598403e-05,
"loss": 0.243,
"step": 1200
},
{
"epoch": 0.38,
"grad_norm": 0.8343615531921387,
"learning_rate": 3.124040528093338e-05,
"loss": 0.198,
"step": 1225
},
{
"epoch": 0.39,
"grad_norm": 1.199294924736023,
"learning_rate": 3.0856616518268346e-05,
"loss": 0.2609,
"step": 1250
},
{
"epoch": 0.4,
"grad_norm": 1.7239773273468018,
"learning_rate": 3.0472827755603318e-05,
"loss": 0.2184,
"step": 1275
},
{
"epoch": 0.41,
"grad_norm": 0.6922329664230347,
"learning_rate": 3.008903899293829e-05,
"loss": 0.2026,
"step": 1300
},
{
"epoch": 0.41,
"grad_norm": 1.3839455842971802,
"learning_rate": 2.9705250230273257e-05,
"loss": 0.2365,
"step": 1325
},
{
"epoch": 0.42,
"grad_norm": 0.3912523686885834,
"learning_rate": 2.932146146760823e-05,
"loss": 0.2228,
"step": 1350
},
{
"epoch": 0.43,
"grad_norm": 0.7774330973625183,
"learning_rate": 2.8937672704943203e-05,
"loss": 0.1836,
"step": 1375
},
{
"epoch": 0.44,
"grad_norm": 0.7025958895683289,
"learning_rate": 2.855388394227817e-05,
"loss": 0.2092,
"step": 1400
},
{
"epoch": 0.45,
"grad_norm": 3.789003849029541,
"learning_rate": 2.8170095179613143e-05,
"loss": 0.217,
"step": 1425
},
{
"epoch": 0.45,
"grad_norm": 0.5001412034034729,
"learning_rate": 2.7786306416948114e-05,
"loss": 0.1771,
"step": 1450
},
{
"epoch": 0.46,
"grad_norm": 0.585847795009613,
"learning_rate": 2.7402517654283082e-05,
"loss": 0.1899,
"step": 1475
},
{
"epoch": 0.47,
"grad_norm": 1.6145225763320923,
"learning_rate": 2.7018728891618057e-05,
"loss": 0.214,
"step": 1500
},
{
"epoch": 0.48,
"grad_norm": 2.724287748336792,
"learning_rate": 2.6634940128953022e-05,
"loss": 0.1857,
"step": 1525
},
{
"epoch": 0.49,
"grad_norm": 1.1571438312530518,
"learning_rate": 2.6251151366287997e-05,
"loss": 0.2046,
"step": 1550
},
{
"epoch": 0.49,
"grad_norm": 3.0686943531036377,
"learning_rate": 2.5867362603622968e-05,
"loss": 0.1841,
"step": 1575
},
{
"epoch": 0.5,
"grad_norm": 1.6015386581420898,
"learning_rate": 2.5483573840957936e-05,
"loss": 0.2095,
"step": 1600
},
{
"epoch": 0.51,
"grad_norm": 1.5645371675491333,
"learning_rate": 2.5099785078292907e-05,
"loss": 0.193,
"step": 1625
},
{
"epoch": 0.52,
"grad_norm": 1.0582934617996216,
"learning_rate": 2.471599631562788e-05,
"loss": 0.2065,
"step": 1650
},
{
"epoch": 0.52,
"grad_norm": 4.154886722564697,
"learning_rate": 2.433220755296285e-05,
"loss": 0.1955,
"step": 1675
},
{
"epoch": 0.53,
"grad_norm": 2.047689437866211,
"learning_rate": 2.3948418790297822e-05,
"loss": 0.1999,
"step": 1700
},
{
"epoch": 0.54,
"grad_norm": 1.7541192770004272,
"learning_rate": 2.3564630027632793e-05,
"loss": 0.1906,
"step": 1725
},
{
"epoch": 0.55,
"grad_norm": 0.8929910063743591,
"learning_rate": 2.318084126496776e-05,
"loss": 0.2156,
"step": 1750
},
{
"epoch": 0.56,
"grad_norm": 2.3634512424468994,
"learning_rate": 2.2797052502302736e-05,
"loss": 0.251,
"step": 1775
},
{
"epoch": 0.56,
"grad_norm": 1.0741249322891235,
"learning_rate": 2.2413263739637704e-05,
"loss": 0.2221,
"step": 1800
},
{
"epoch": 0.57,
"grad_norm": 0.6083930134773254,
"learning_rate": 2.2029474976972675e-05,
"loss": 0.1922,
"step": 1825
},
{
"epoch": 0.58,
"grad_norm": 3.046408176422119,
"learning_rate": 2.1645686214307644e-05,
"loss": 0.1634,
"step": 1850
},
{
"epoch": 0.59,
"grad_norm": 1.0440948009490967,
"learning_rate": 2.126189745164262e-05,
"loss": 0.1798,
"step": 1875
},
{
"epoch": 0.6,
"grad_norm": 2.302553176879883,
"learning_rate": 2.0878108688977586e-05,
"loss": 0.179,
"step": 1900
},
{
"epoch": 0.6,
"grad_norm": 0.5894582867622375,
"learning_rate": 2.0494319926312558e-05,
"loss": 0.2181,
"step": 1925
},
{
"epoch": 0.61,
"grad_norm": 1.7287715673446655,
"learning_rate": 2.011053116364753e-05,
"loss": 0.1996,
"step": 1950
},
{
"epoch": 0.62,
"grad_norm": 2.5236544609069824,
"learning_rate": 1.97267424009825e-05,
"loss": 0.2366,
"step": 1975
},
{
"epoch": 0.63,
"grad_norm": 1.8819808959960938,
"learning_rate": 1.9342953638317472e-05,
"loss": 0.1906,
"step": 2000
},
{
"epoch": 0.63,
"grad_norm": 0.5952481031417847,
"learning_rate": 1.895916487565244e-05,
"loss": 0.1749,
"step": 2025
},
{
"epoch": 0.64,
"grad_norm": 0.9825776815414429,
"learning_rate": 1.8575376112987415e-05,
"loss": 0.2179,
"step": 2050
},
{
"epoch": 0.65,
"grad_norm": 1.5762274265289307,
"learning_rate": 1.8191587350322383e-05,
"loss": 0.169,
"step": 2075
},
{
"epoch": 0.66,
"grad_norm": 1.433192253112793,
"learning_rate": 1.7807798587657354e-05,
"loss": 0.1677,
"step": 2100
},
{
"epoch": 0.67,
"grad_norm": 2.5697052478790283,
"learning_rate": 1.7424009824992322e-05,
"loss": 0.2337,
"step": 2125
},
{
"epoch": 0.67,
"grad_norm": 1.336775541305542,
"learning_rate": 1.7040221062327297e-05,
"loss": 0.2168,
"step": 2150
},
{
"epoch": 0.68,
"grad_norm": 2.746216058731079,
"learning_rate": 1.6656432299662265e-05,
"loss": 0.1887,
"step": 2175
},
{
"epoch": 0.69,
"grad_norm": 2.6653735637664795,
"learning_rate": 1.6272643536997237e-05,
"loss": 0.2085,
"step": 2200
},
{
"epoch": 0.7,
"grad_norm": 2.748147487640381,
"learning_rate": 1.5888854774332208e-05,
"loss": 0.1927,
"step": 2225
},
{
"epoch": 0.7,
"grad_norm": 1.3852734565734863,
"learning_rate": 1.550506601166718e-05,
"loss": 0.1563,
"step": 2250
},
{
"epoch": 0.71,
"grad_norm": 3.5002951622009277,
"learning_rate": 1.512127724900215e-05,
"loss": 0.1662,
"step": 2275
},
{
"epoch": 0.72,
"grad_norm": 0.5453472137451172,
"learning_rate": 1.473748848633712e-05,
"loss": 0.1789,
"step": 2300
},
{
"epoch": 0.73,
"grad_norm": 2.8527770042419434,
"learning_rate": 1.4353699723672092e-05,
"loss": 0.2245,
"step": 2325
},
{
"epoch": 0.74,
"grad_norm": 0.5989676713943481,
"learning_rate": 1.3969910961007064e-05,
"loss": 0.1695,
"step": 2350
},
{
"epoch": 0.74,
"grad_norm": 0.6295841932296753,
"learning_rate": 1.3586122198342033e-05,
"loss": 0.1611,
"step": 2375
},
{
"epoch": 0.75,
"grad_norm": 0.4394790232181549,
"learning_rate": 1.3202333435677003e-05,
"loss": 0.2185,
"step": 2400
},
{
"epoch": 0.76,
"grad_norm": 0.9019514322280884,
"learning_rate": 1.2818544673011976e-05,
"loss": 0.1679,
"step": 2425
},
{
"epoch": 0.77,
"grad_norm": 0.7013410329818726,
"learning_rate": 1.2434755910346946e-05,
"loss": 0.1752,
"step": 2450
},
{
"epoch": 0.78,
"grad_norm": 0.7528170347213745,
"learning_rate": 1.2050967147681916e-05,
"loss": 0.1999,
"step": 2475
},
{
"epoch": 0.78,
"grad_norm": 1.6549068689346313,
"learning_rate": 1.1667178385016887e-05,
"loss": 0.1954,
"step": 2500
},
{
"epoch": 0.79,
"grad_norm": 2.296861410140991,
"learning_rate": 1.1283389622351858e-05,
"loss": 0.1999,
"step": 2525
},
{
"epoch": 0.8,
"grad_norm": 3.1202523708343506,
"learning_rate": 1.0899600859686828e-05,
"loss": 0.2417,
"step": 2550
},
{
"epoch": 0.81,
"grad_norm": 1.385256052017212,
"learning_rate": 1.05158120970218e-05,
"loss": 0.1598,
"step": 2575
},
{
"epoch": 0.81,
"grad_norm": 1.5936309099197388,
"learning_rate": 1.0132023334356771e-05,
"loss": 0.1629,
"step": 2600
},
{
"epoch": 0.82,
"grad_norm": 1.5769296884536743,
"learning_rate": 9.748234571691742e-06,
"loss": 0.1823,
"step": 2625
},
{
"epoch": 0.83,
"grad_norm": 1.9773831367492676,
"learning_rate": 9.364445809026712e-06,
"loss": 0.1851,
"step": 2650
},
{
"epoch": 0.84,
"grad_norm": 2.044830799102783,
"learning_rate": 8.980657046361684e-06,
"loss": 0.1704,
"step": 2675
},
{
"epoch": 0.85,
"grad_norm": 0.4414065480232239,
"learning_rate": 8.596868283696653e-06,
"loss": 0.1756,
"step": 2700
},
{
"epoch": 0.85,
"grad_norm": 2.6521832942962646,
"learning_rate": 8.213079521031625e-06,
"loss": 0.1898,
"step": 2725
},
{
"epoch": 0.86,
"grad_norm": 0.5525100231170654,
"learning_rate": 7.829290758366594e-06,
"loss": 0.192,
"step": 2750
},
{
"epoch": 0.87,
"grad_norm": 2.2816734313964844,
"learning_rate": 7.445501995701566e-06,
"loss": 0.1852,
"step": 2775
},
{
"epoch": 0.88,
"grad_norm": 1.4024970531463623,
"learning_rate": 7.061713233036537e-06,
"loss": 0.2279,
"step": 2800
},
{
"epoch": 0.88,
"grad_norm": 2.59187912940979,
"learning_rate": 6.677924470371508e-06,
"loss": 0.195,
"step": 2825
},
{
"epoch": 0.89,
"grad_norm": 4.647696018218994,
"learning_rate": 6.294135707706479e-06,
"loss": 0.1785,
"step": 2850
},
{
"epoch": 0.9,
"grad_norm": 2.7766268253326416,
"learning_rate": 5.910346945041449e-06,
"loss": 0.188,
"step": 2875
},
{
"epoch": 0.91,
"grad_norm": 2.672715425491333,
"learning_rate": 5.52655818237642e-06,
"loss": 0.1467,
"step": 2900
},
{
"epoch": 0.92,
"grad_norm": 0.6805928945541382,
"learning_rate": 5.142769419711391e-06,
"loss": 0.1561,
"step": 2925
},
{
"epoch": 0.92,
"grad_norm": 2.8108584880828857,
"learning_rate": 4.7589806570463625e-06,
"loss": 0.189,
"step": 2950
},
{
"epoch": 0.93,
"grad_norm": 0.4544536769390106,
"learning_rate": 4.375191894381333e-06,
"loss": 0.1464,
"step": 2975
},
{
"epoch": 0.94,
"grad_norm": 0.4506062865257263,
"learning_rate": 3.991403131716304e-06,
"loss": 0.1533,
"step": 3000
},
{
"epoch": 0.95,
"grad_norm": 1.9615803956985474,
"learning_rate": 3.607614369051274e-06,
"loss": 0.184,
"step": 3025
},
{
"epoch": 0.96,
"grad_norm": 3.426203489303589,
"learning_rate": 3.223825606386245e-06,
"loss": 0.1744,
"step": 3050
},
{
"epoch": 0.96,
"grad_norm": 2.0022315979003906,
"learning_rate": 2.8400368437212158e-06,
"loss": 0.1839,
"step": 3075
},
{
"epoch": 0.97,
"grad_norm": 0.8066006302833557,
"learning_rate": 2.4562480810561868e-06,
"loss": 0.199,
"step": 3100
},
{
"epoch": 0.98,
"grad_norm": 2.2845537662506104,
"learning_rate": 2.0724593183911578e-06,
"loss": 0.1361,
"step": 3125
},
{
"epoch": 0.99,
"grad_norm": 0.5564154982566833,
"learning_rate": 1.6886705557261286e-06,
"loss": 0.217,
"step": 3150
},
{
"epoch": 0.99,
"grad_norm": 0.6154067516326904,
"learning_rate": 1.3048817930610991e-06,
"loss": 0.1631,
"step": 3175
},
{
"epoch": 1.0,
"grad_norm": 0.6170292496681213,
"learning_rate": 9.2109303039607e-07,
"loss": 0.1161,
"step": 3200
},
{
"epoch": 1.01,
"grad_norm": 3.1419057846069336,
"learning_rate": 5.373042677310408e-07,
"loss": 0.106,
"step": 3225
},
{
"epoch": 1.02,
"grad_norm": 0.4944257438182831,
"learning_rate": 1.5351550506601167e-07,
"loss": 0.1072,
"step": 3250
},
{
"epoch": 1.02,
"step": 3257,
"total_flos": 4.038329842708009e+18,
"train_loss": 0.2225519281828883,
"train_runtime": 2924.985,
"train_samples_per_second": 17.815,
"train_steps_per_second": 1.114
}
],
"logging_steps": 25,
"max_steps": 3257,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 100,
"total_flos": 4.038329842708009e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}