llama3-8b-derta / trainer_state.json
Youliang's picture
Upload trainer_state.json
cb62551 verified
raw
history blame
18.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1022,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019569471624266144,
"grad_norm": 2.358055099231819,
"learning_rate": 6.451612903225806e-06,
"loss": 0.6115,
"step": 10
},
{
"epoch": 0.03913894324853229,
"grad_norm": 2.4319114197234546,
"learning_rate": 1.2903225806451613e-05,
"loss": 0.5445,
"step": 20
},
{
"epoch": 0.05870841487279843,
"grad_norm": 2.0017713808509323,
"learning_rate": 1.935483870967742e-05,
"loss": 0.5314,
"step": 30
},
{
"epoch": 0.07827788649706457,
"grad_norm": 2.135041689320543,
"learning_rate": 1.999593015378676e-05,
"loss": 0.5605,
"step": 40
},
{
"epoch": 0.09784735812133072,
"grad_norm": 1.886107920458415,
"learning_rate": 1.9981865802584795e-05,
"loss": 0.5519,
"step": 50
},
{
"epoch": 0.11741682974559686,
"grad_norm": 1.7545253281285733,
"learning_rate": 1.9957770831058518e-05,
"loss": 0.5715,
"step": 60
},
{
"epoch": 0.136986301369863,
"grad_norm": 1.9425827544859249,
"learning_rate": 1.9923669451866753e-05,
"loss": 0.5751,
"step": 70
},
{
"epoch": 0.15655577299412915,
"grad_norm": 1.9306565295676892,
"learning_rate": 1.987959593295039e-05,
"loss": 0.5686,
"step": 80
},
{
"epoch": 0.1761252446183953,
"grad_norm": 1.7564236049441155,
"learning_rate": 1.9825594563097043e-05,
"loss": 0.5782,
"step": 90
},
{
"epoch": 0.19569471624266144,
"grad_norm": 1.6232157228047726,
"learning_rate": 1.9761719607435973e-05,
"loss": 0.57,
"step": 100
},
{
"epoch": 0.21526418786692758,
"grad_norm": 1.8228974550233925,
"learning_rate": 1.9688035252907888e-05,
"loss": 0.568,
"step": 110
},
{
"epoch": 0.23483365949119372,
"grad_norm": 1.7488528306352211,
"learning_rate": 1.9604615543764506e-05,
"loss": 0.5719,
"step": 120
},
{
"epoch": 0.25440313111545987,
"grad_norm": 1.6679225084385316,
"learning_rate": 1.9511544307162656e-05,
"loss": 0.5661,
"step": 130
},
{
"epoch": 0.273972602739726,
"grad_norm": 1.5735195680219156,
"learning_rate": 1.9408915068927653e-05,
"loss": 0.5751,
"step": 140
},
{
"epoch": 0.29354207436399216,
"grad_norm": 1.881773729389861,
"learning_rate": 1.9296830959570697e-05,
"loss": 0.567,
"step": 150
},
{
"epoch": 0.3131115459882583,
"grad_norm": 1.669906241360482,
"learning_rate": 1.917540461065462e-05,
"loss": 0.5652,
"step": 160
},
{
"epoch": 0.33268101761252444,
"grad_norm": 1.4933017744550008,
"learning_rate": 1.9044758041612207e-05,
"loss": 0.5682,
"step": 170
},
{
"epoch": 0.3522504892367906,
"grad_norm": 1.7120959396931783,
"learning_rate": 1.8905022537130774e-05,
"loss": 0.571,
"step": 180
},
{
"epoch": 0.37181996086105673,
"grad_norm": 1.8277560782891744,
"learning_rate": 1.875633851522625e-05,
"loss": 0.5774,
"step": 190
},
{
"epoch": 0.3913894324853229,
"grad_norm": 1.4948184800706057,
"learning_rate": 1.859885538613932e-05,
"loss": 0.5694,
"step": 200
},
{
"epoch": 0.410958904109589,
"grad_norm": 1.5917802493859525,
"learning_rate": 1.843273140219541e-05,
"loss": 0.5697,
"step": 210
},
{
"epoch": 0.43052837573385516,
"grad_norm": 1.6561321000655207,
"learning_rate": 1.8258133498779407e-05,
"loss": 0.5664,
"step": 220
},
{
"epoch": 0.4500978473581213,
"grad_norm": 1.6233826437999508,
"learning_rate": 1.807523712658493e-05,
"loss": 0.5543,
"step": 230
},
{
"epoch": 0.46966731898238745,
"grad_norm": 1.5920461634982808,
"learning_rate": 1.7884226075306652e-05,
"loss": 0.5717,
"step": 240
},
{
"epoch": 0.4892367906066536,
"grad_norm": 1.5142935183933208,
"learning_rate": 1.768529228895294e-05,
"loss": 0.5511,
"step": 250
},
{
"epoch": 0.5088062622309197,
"grad_norm": 1.5778513000637655,
"learning_rate": 1.7478635672964324e-05,
"loss": 0.5704,
"step": 260
},
{
"epoch": 0.5283757338551859,
"grad_norm": 1.4360677799071841,
"learning_rate": 1.726446389333166e-05,
"loss": 0.55,
"step": 270
},
{
"epoch": 0.547945205479452,
"grad_norm": 1.7777152878321898,
"learning_rate": 1.7042992167915836e-05,
"loss": 0.5601,
"step": 280
},
{
"epoch": 0.5675146771037182,
"grad_norm": 1.6013006228828397,
"learning_rate": 1.6814443050178713e-05,
"loss": 0.5597,
"step": 290
},
{
"epoch": 0.5870841487279843,
"grad_norm": 1.522062257999709,
"learning_rate": 1.6579046205542656e-05,
"loss": 0.5425,
"step": 300
},
{
"epoch": 0.6066536203522505,
"grad_norm": 1.9444890864863704,
"learning_rate": 1.6337038180603332e-05,
"loss": 0.5588,
"step": 310
},
{
"epoch": 0.6262230919765166,
"grad_norm": 1.6256702214704648,
"learning_rate": 1.6088662165427767e-05,
"loss": 0.5656,
"step": 320
},
{
"epoch": 0.6457925636007827,
"grad_norm": 1.6987309599785507,
"learning_rate": 1.583416774917647e-05,
"loss": 0.5464,
"step": 330
},
{
"epoch": 0.6653620352250489,
"grad_norm": 1.536714185544068,
"learning_rate": 1.5573810669295176e-05,
"loss": 0.5548,
"step": 340
},
{
"epoch": 0.684931506849315,
"grad_norm": 1.4517361145984635,
"learning_rate": 1.5307852554528318e-05,
"loss": 0.5412,
"step": 350
},
{
"epoch": 0.7045009784735812,
"grad_norm": 1.379468851703595,
"learning_rate": 1.5036560662012405e-05,
"loss": 0.5548,
"step": 360
},
{
"epoch": 0.7240704500978473,
"grad_norm": 1.560556194482889,
"learning_rate": 1.4760207608713515e-05,
"loss": 0.5468,
"step": 370
},
{
"epoch": 0.7436399217221135,
"grad_norm": 1.5799605015754972,
"learning_rate": 1.4479071097478778e-05,
"loss": 0.5356,
"step": 380
},
{
"epoch": 0.7632093933463796,
"grad_norm": 1.490930077385001,
"learning_rate": 1.4193433637977165e-05,
"loss": 0.548,
"step": 390
},
{
"epoch": 0.7827788649706457,
"grad_norm": 1.493190089036989,
"learning_rate": 1.3903582262809918e-05,
"loss": 0.5452,
"step": 400
},
{
"epoch": 0.8023483365949119,
"grad_norm": 1.5176481135073754,
"learning_rate": 1.3609808239076025e-05,
"loss": 0.5514,
"step": 410
},
{
"epoch": 0.821917808219178,
"grad_norm": 1.8050366348817912,
"learning_rate": 1.3312406775682471e-05,
"loss": 0.5472,
"step": 420
},
{
"epoch": 0.8414872798434442,
"grad_norm": 1.5083842404171728,
"learning_rate": 1.3011676726693432e-05,
"loss": 0.5526,
"step": 430
},
{
"epoch": 0.8610567514677103,
"grad_norm": 1.4905244037584433,
"learning_rate": 1.2707920291016526e-05,
"loss": 0.535,
"step": 440
},
{
"epoch": 0.8806262230919765,
"grad_norm": 1.5693631315219312,
"learning_rate": 1.2401442708727869e-05,
"loss": 0.5338,
"step": 450
},
{
"epoch": 0.9001956947162426,
"grad_norm": 1.4345201206970122,
"learning_rate": 1.2092551954341104e-05,
"loss": 0.5371,
"step": 460
},
{
"epoch": 0.9197651663405088,
"grad_norm": 1.524988255690081,
"learning_rate": 1.1781558427328662e-05,
"loss": 0.5252,
"step": 470
},
{
"epoch": 0.9393346379647749,
"grad_norm": 1.4177625942380498,
"learning_rate": 1.146877464020618e-05,
"loss": 0.5225,
"step": 480
},
{
"epoch": 0.958904109589041,
"grad_norm": 1.4410916116833656,
"learning_rate": 1.1154514904493599e-05,
"loss": 0.5265,
"step": 490
},
{
"epoch": 0.9784735812133072,
"grad_norm": 1.455518822210514,
"learning_rate": 1.083909501486844e-05,
"loss": 0.5169,
"step": 500
},
{
"epoch": 0.9980430528375733,
"grad_norm": 1.4301843665722596,
"learning_rate": 1.0522831931828677e-05,
"loss": 0.5229,
"step": 510
},
{
"epoch": 1.0176125244618395,
"grad_norm": 1.8242107121619293,
"learning_rate": 1.0206043463184127e-05,
"loss": 0.2745,
"step": 520
},
{
"epoch": 1.0371819960861057,
"grad_norm": 1.4326923249244197,
"learning_rate": 9.889047944696354e-06,
"loss": 0.2286,
"step": 530
},
{
"epoch": 1.0567514677103718,
"grad_norm": 1.404291696764401,
"learning_rate": 9.57216392018806e-06,
"loss": 0.2274,
"step": 540
},
{
"epoch": 1.076320939334638,
"grad_norm": 1.4383258106098322,
"learning_rate": 9.255709821443399e-06,
"loss": 0.2158,
"step": 550
},
{
"epoch": 1.095890410958904,
"grad_norm": 1.5838369354979953,
"learning_rate": 8.940003648220863e-06,
"loss": 0.221,
"step": 560
},
{
"epoch": 1.1154598825831703,
"grad_norm": 1.3260950619639504,
"learning_rate": 8.625362648700332e-06,
"loss": 0.2146,
"step": 570
},
{
"epoch": 1.1350293542074363,
"grad_norm": 1.4928296689616758,
"learning_rate": 8.31210300068534e-06,
"loss": 0.2162,
"step": 580
},
{
"epoch": 1.1545988258317026,
"grad_norm": 1.2720796331424087,
"learning_rate": 8.000539493880972e-06,
"loss": 0.2137,
"step": 590
},
{
"epoch": 1.1741682974559686,
"grad_norm": 1.467401250476884,
"learning_rate": 7.69098521356662e-06,
"loss": 0.2075,
"step": 600
},
{
"epoch": 1.1937377690802349,
"grad_norm": 1.6529789418415561,
"learning_rate": 7.383751225981503e-06,
"loss": 0.2079,
"step": 610
},
{
"epoch": 1.213307240704501,
"grad_norm": 1.3912168807674363,
"learning_rate": 7.079146265739079e-06,
"loss": 0.1975,
"step": 620
},
{
"epoch": 1.2328767123287672,
"grad_norm": 1.2032682160188106,
"learning_rate": 6.777476425584486e-06,
"loss": 0.1982,
"step": 630
},
{
"epoch": 1.2524461839530332,
"grad_norm": 1.3762644242614392,
"learning_rate": 6.479044848806739e-06,
"loss": 0.1936,
"step": 640
},
{
"epoch": 1.2720156555772995,
"grad_norm": 1.4113607994624038,
"learning_rate": 6.184151424614795e-06,
"loss": 0.1972,
"step": 650
},
{
"epoch": 1.2915851272015655,
"grad_norm": 1.33710607093093,
"learning_rate": 5.893092486783594e-06,
"loss": 0.1959,
"step": 660
},
{
"epoch": 1.3111545988258317,
"grad_norm": 1.2851278655561311,
"learning_rate": 5.606160515872886e-06,
"loss": 0.1967,
"step": 670
},
{
"epoch": 1.3307240704500978,
"grad_norm": 1.354094894659071,
"learning_rate": 5.323643845318135e-06,
"loss": 0.1809,
"step": 680
},
{
"epoch": 1.350293542074364,
"grad_norm": 1.3589718977357292,
"learning_rate": 5.04582637168874e-06,
"loss": 0.1797,
"step": 690
},
{
"epoch": 1.36986301369863,
"grad_norm": 1.3320249853797883,
"learning_rate": 4.772987269404855e-06,
"loss": 0.1876,
"step": 700
},
{
"epoch": 1.3894324853228963,
"grad_norm": 1.255711002395159,
"learning_rate": 4.505400710199376e-06,
"loss": 0.1738,
"step": 710
},
{
"epoch": 1.4090019569471623,
"grad_norm": 1.3203552961467395,
"learning_rate": 4.243335587607074e-06,
"loss": 0.1731,
"step": 720
},
{
"epoch": 1.4285714285714286,
"grad_norm": 1.4388318733364938,
"learning_rate": 3.987055246757701e-06,
"loss": 0.1748,
"step": 730
},
{
"epoch": 1.4481409001956946,
"grad_norm": 1.1814268312854903,
"learning_rate": 3.7368172197446007e-06,
"loss": 0.1695,
"step": 740
},
{
"epoch": 1.467710371819961,
"grad_norm": 1.4097124495060538,
"learning_rate": 3.4928729668347616e-06,
"loss": 0.1685,
"step": 750
},
{
"epoch": 1.487279843444227,
"grad_norm": 1.378405688153761,
"learning_rate": 3.2554676237803117e-06,
"loss": 0.1684,
"step": 760
},
{
"epoch": 1.5068493150684932,
"grad_norm": 1.229111519346911,
"learning_rate": 3.0248397554854813e-06,
"loss": 0.1683,
"step": 770
},
{
"epoch": 1.5264187866927594,
"grad_norm": 1.3031304027710664,
"learning_rate": 2.801221116276436e-06,
"loss": 0.1655,
"step": 780
},
{
"epoch": 1.5459882583170255,
"grad_norm": 1.2785430784043106,
"learning_rate": 2.5848364170150307e-06,
"loss": 0.1648,
"step": 790
},
{
"epoch": 1.5655577299412915,
"grad_norm": 1.358399709979297,
"learning_rate": 2.375903099290362e-06,
"loss": 0.1597,
"step": 800
},
{
"epoch": 1.5851272015655578,
"grad_norm": 1.2024572922385197,
"learning_rate": 2.174631116915137e-06,
"loss": 0.1561,
"step": 810
},
{
"epoch": 1.604696673189824,
"grad_norm": 1.3460799576843692,
"learning_rate": 1.981222724946383e-06,
"loss": 0.1522,
"step": 820
},
{
"epoch": 1.62426614481409,
"grad_norm": 1.3491783502711767,
"learning_rate": 1.7958722764425119e-06,
"loss": 0.1536,
"step": 830
},
{
"epoch": 1.643835616438356,
"grad_norm": 1.3720639700420578,
"learning_rate": 1.6187660271609773e-06,
"loss": 0.147,
"step": 840
},
{
"epoch": 1.6634050880626223,
"grad_norm": 1.3633119421184674,
"learning_rate": 1.4500819483927898e-06,
"loss": 0.1481,
"step": 850
},
{
"epoch": 1.6829745596868886,
"grad_norm": 1.2542715045711834,
"learning_rate": 1.2899895481219672e-06,
"loss": 0.1509,
"step": 860
},
{
"epoch": 1.7025440313111546,
"grad_norm": 1.2321547326015214,
"learning_rate": 1.1386497006896058e-06,
"loss": 0.1431,
"step": 870
},
{
"epoch": 1.7221135029354206,
"grad_norm": 1.1527861182486887,
"learning_rate": 9.962144851337863e-07,
"loss": 0.1369,
"step": 880
},
{
"epoch": 1.741682974559687,
"grad_norm": 1.1491235254021608,
"learning_rate": 8.628270323677424e-07,
"loss": 0.1469,
"step": 890
},
{
"epoch": 1.7612524461839532,
"grad_norm": 1.4751342647673045,
"learning_rate": 7.386213813498344e-07,
"loss": 0.1533,
"step": 900
},
{
"epoch": 1.7808219178082192,
"grad_norm": 1.2401144134897077,
"learning_rate": 6.237223443899221e-07,
"loss": 0.1413,
"step": 910
},
{
"epoch": 1.8003913894324852,
"grad_norm": 1.1565076180160574,
"learning_rate": 5.18245381727418e-07,
"loss": 0.1448,
"step": 920
},
{
"epoch": 1.8199608610567515,
"grad_norm": 1.162280466003293,
"learning_rate": 4.222964855071154e-07,
"loss": 0.1398,
"step": 930
},
{
"epoch": 1.8395303326810177,
"grad_norm": 1.3768345558574515,
"learning_rate": 3.359720732693361e-07,
"loss": 0.1463,
"step": 940
},
{
"epoch": 1.8590998043052838,
"grad_norm": 1.2842826772954274,
"learning_rate": 2.5935889106146305e-07,
"loss": 0.1452,
"step": 950
},
{
"epoch": 1.8786692759295498,
"grad_norm": 1.27912475359879,
"learning_rate": 1.9253392626819468e-07,
"loss": 0.1409,
"step": 960
},
{
"epoch": 1.898238747553816,
"grad_norm": 1.2881607138075466,
"learning_rate": 1.3556433024813353e-07,
"loss": 0.1456,
"step": 970
},
{
"epoch": 1.9178082191780823,
"grad_norm": 1.1998515494235076,
"learning_rate": 8.850735085443763e-08,
"loss": 0.1456,
"step": 980
},
{
"epoch": 1.9373776908023483,
"grad_norm": 1.2190320104381343,
"learning_rate": 5.141027490735195e-08,
"loss": 0.1488,
"step": 990
},
{
"epoch": 1.9569471624266144,
"grad_norm": 1.1520725254335977,
"learning_rate": 2.431038067642111e-08,
"loss": 0.1424,
"step": 1000
},
{
"epoch": 1.9765166340508806,
"grad_norm": 1.1950165625346185,
"learning_rate": 7.234900420147739e-09,
"loss": 0.1372,
"step": 1010
},
{
"epoch": 1.9960861056751469,
"grad_norm": 1.124102075180773,
"learning_rate": 2.0099302071807658e-10,
"loss": 0.1414,
"step": 1020
},
{
"epoch": 2.0,
"step": 1022,
"total_flos": 183951682437120.0,
"train_loss": 0.36399914105577713,
"train_runtime": 9021.6815,
"train_samples_per_second": 14.477,
"train_steps_per_second": 0.113
}
],
"logging_steps": 10,
"max_steps": 1022,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 5000,
"total_flos": 183951682437120.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}