svmoe / trainer_state.json
Eiki's picture
Upload folder using huggingface_hub
570ef28 verified
{
"best_metric": 0.2237541824579239,
"best_model_checkpoint": "../../models/moe/moe/onlyRouterTrue_aveSVLossFalse_lr5e-03_ralc0.00/checkpoint-13692",
"epoch": 1.0,
"eval_steps": 3423,
"global_step": 13692,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010005842827928718,
"grad_norm": 3.4739856719970703,
"learning_rate": 4.9987649590704944e-05,
"loss": 0.6176,
"step": 137
},
{
"epoch": 0.020011685655857436,
"grad_norm": 2.701633930206299,
"learning_rate": 4.9950610565428546e-05,
"loss": 0.5809,
"step": 274
},
{
"epoch": 0.03001752848378615,
"grad_norm": 2.7198803424835205,
"learning_rate": 4.988891951994058e-05,
"loss": 0.572,
"step": 411
},
{
"epoch": 0.04002337131171487,
"grad_norm": 2.594041109085083,
"learning_rate": 4.9802637407013966e-05,
"loss": 0.5511,
"step": 548
},
{
"epoch": 0.05002921413964359,
"grad_norm": 2.6721420288085938,
"learning_rate": 4.969184947620146e-05,
"loss": 0.5502,
"step": 685
},
{
"epoch": 0.0600350569675723,
"grad_norm": 2.6376290321350098,
"learning_rate": 4.9556665189606316e-05,
"loss": 0.5266,
"step": 822
},
{
"epoch": 0.07004089979550102,
"grad_norm": 2.9528913497924805,
"learning_rate": 4.93972181137301e-05,
"loss": 0.536,
"step": 959
},
{
"epoch": 0.08004674262342974,
"grad_norm": 2.715660810470581,
"learning_rate": 4.9213665787504655e-05,
"loss": 0.5056,
"step": 1096
},
{
"epoch": 0.09005258545135846,
"grad_norm": 3.309116840362549,
"learning_rate": 4.900618956663845e-05,
"loss": 0.5124,
"step": 1233
},
{
"epoch": 0.10005842827928718,
"grad_norm": 2.226123332977295,
"learning_rate": 4.877499444443122e-05,
"loss": 0.5199,
"step": 1370
},
{
"epoch": 0.1100642711072159,
"grad_norm": 3.922006130218506,
"learning_rate": 4.852030884923388e-05,
"loss": 0.4957,
"step": 1507
},
{
"epoch": 0.1200701139351446,
"grad_norm": 3.2229082584381104,
"learning_rate": 4.82423844187538e-05,
"loss": 0.4864,
"step": 1644
},
{
"epoch": 0.13007595676307332,
"grad_norm": 1.936515212059021,
"learning_rate": 4.7941495751428536e-05,
"loss": 0.4653,
"step": 1781
},
{
"epoch": 0.14008179959100203,
"grad_norm": 2.966193199157715,
"learning_rate": 4.7617940135113606e-05,
"loss": 0.4722,
"step": 1918
},
{
"epoch": 0.15008764241893077,
"grad_norm": 3.2486863136291504,
"learning_rate": 4.7272037253352276e-05,
"loss": 0.4477,
"step": 2055
},
{
"epoch": 0.16009348524685948,
"grad_norm": 2.4895386695861816,
"learning_rate": 4.690412886951786e-05,
"loss": 0.4657,
"step": 2192
},
{
"epoch": 0.1700993280747882,
"grad_norm": 2.049923896789551,
"learning_rate": 4.651457848914021e-05,
"loss": 0.4538,
"step": 2329
},
{
"epoch": 0.1801051709027169,
"grad_norm": 2.0172815322875977,
"learning_rate": 4.610377100075045e-05,
"loss": 0.4317,
"step": 2466
},
{
"epoch": 0.19011101373064562,
"grad_norm": 2.2022132873535156,
"learning_rate": 4.5672112295598404e-05,
"loss": 0.4336,
"step": 2603
},
{
"epoch": 0.20011685655857436,
"grad_norm": 4.102416038513184,
"learning_rate": 4.5220028866618837e-05,
"loss": 0.4226,
"step": 2740
},
{
"epoch": 0.21012269938650308,
"grad_norm": 2.130418539047241,
"learning_rate": 4.4747967387042424e-05,
"loss": 0.4242,
"step": 2877
},
{
"epoch": 0.2201285422144318,
"grad_norm": 2.399773120880127,
"learning_rate": 4.4256394269067967e-05,
"loss": 0.4097,
"step": 3014
},
{
"epoch": 0.2301343850423605,
"grad_norm": 1.86017644405365,
"learning_rate": 4.3745795203031904e-05,
"loss": 0.4034,
"step": 3151
},
{
"epoch": 0.2401402278702892,
"grad_norm": 1.667731761932373,
"learning_rate": 4.321667467753034e-05,
"loss": 0.4108,
"step": 3288
},
{
"epoch": 0.25,
"eval_loss": 0.48173779249191284,
"eval_runtime": 367.5367,
"eval_samples_per_second": 31.393,
"eval_steps_per_second": 7.85,
"step": 3423
},
{
"epoch": 0.2501460706982179,
"grad_norm": 5.194522857666016,
"learning_rate": 4.2669555480967783e-05,
"loss": 0.3973,
"step": 3425
},
{
"epoch": 0.26015191352614664,
"grad_norm": 2.7280290126800537,
"learning_rate": 4.210497818502509e-05,
"loss": 0.409,
"step": 3562
},
{
"epoch": 0.27015775635407535,
"grad_norm": 2.3584136962890625,
"learning_rate": 4.152350061055695e-05,
"loss": 0.3855,
"step": 3699
},
{
"epoch": 0.28016359918200406,
"grad_norm": 3.4275765419006348,
"learning_rate": 4.092569727644661e-05,
"loss": 0.4029,
"step": 3836
},
{
"epoch": 0.29016944200993283,
"grad_norm": 2.14209246635437,
"learning_rate": 4.031215883196239e-05,
"loss": 0.3836,
"step": 3973
},
{
"epoch": 0.30017528483786154,
"grad_norm": 2.825669288635254,
"learning_rate": 3.968349147317693e-05,
"loss": 0.3842,
"step": 4110
},
{
"epoch": 0.31018112766579026,
"grad_norm": 2.3074870109558105,
"learning_rate": 3.904031634402552e-05,
"loss": 0.3601,
"step": 4247
},
{
"epoch": 0.32018697049371897,
"grad_norm": 2.8916587829589844,
"learning_rate": 3.838326892259564e-05,
"loss": 0.38,
"step": 4384
},
{
"epoch": 0.3301928133216477,
"grad_norm": 1.9778506755828857,
"learning_rate": 3.7712998393253786e-05,
"loss": 0.3611,
"step": 4521
},
{
"epoch": 0.3401986561495764,
"grad_norm": 2.735363721847534,
"learning_rate": 3.703016700522999e-05,
"loss": 0.3653,
"step": 4658
},
{
"epoch": 0.3502044989775051,
"grad_norm": 3.4953627586364746,
"learning_rate": 3.6335449418293985e-05,
"loss": 0.3754,
"step": 4795
},
{
"epoch": 0.3602103418054338,
"grad_norm": 4.839205265045166,
"learning_rate": 3.562953203616925e-05,
"loss": 0.3468,
"step": 4932
},
{
"epoch": 0.37021618463336253,
"grad_norm": 2.1916751861572266,
"learning_rate": 3.491311232834357e-05,
"loss": 0.3524,
"step": 5069
},
{
"epoch": 0.38022202746129125,
"grad_norm": 3.1856038570404053,
"learning_rate": 3.418689814094646e-05,
"loss": 0.3501,
"step": 5206
},
{
"epoch": 0.39022787028921996,
"grad_norm": 1.9597059488296509,
"learning_rate": 3.345160699737394e-05,
"loss": 0.3474,
"step": 5343
},
{
"epoch": 0.4002337131171487,
"grad_norm": 2.523240566253662,
"learning_rate": 3.2707965389351925e-05,
"loss": 0.3355,
"step": 5480
},
{
"epoch": 0.41023955594507744,
"grad_norm": 16.007722854614258,
"learning_rate": 3.195670805913866e-05,
"loss": 0.3285,
"step": 5617
},
{
"epoch": 0.42024539877300615,
"grad_norm": 4.493948936462402,
"learning_rate": 3.119857727357527e-05,
"loss": 0.3232,
"step": 5754
},
{
"epoch": 0.43025124160093486,
"grad_norm": 3.0784451961517334,
"learning_rate": 3.0434322090701827e-05,
"loss": 0.3264,
"step": 5891
},
{
"epoch": 0.4402570844288636,
"grad_norm": 2.9652538299560547,
"learning_rate": 2.9664697619663472e-05,
"loss": 0.3147,
"step": 6028
},
{
"epoch": 0.4502629272567923,
"grad_norm": 2.1190903186798096,
"learning_rate": 2.8890464274637876e-05,
"loss": 0.3096,
"step": 6165
},
{
"epoch": 0.460268770084721,
"grad_norm": 7.0982985496521,
"learning_rate": 2.8112387023521115e-05,
"loss": 0.2968,
"step": 6302
},
{
"epoch": 0.4702746129126497,
"grad_norm": 1.3744592666625977,
"learning_rate": 2.733123463211434e-05,
"loss": 0.3036,
"step": 6439
},
{
"epoch": 0.4802804557405784,
"grad_norm": 1.526167631149292,
"learning_rate": 2.6547778904558018e-05,
"loss": 0.3069,
"step": 6576
},
{
"epoch": 0.49028629856850714,
"grad_norm": 1.452222466468811,
"learning_rate": 2.5762793920764124e-05,
"loss": 0.2992,
"step": 6713
},
{
"epoch": 0.5,
"eval_loss": 0.3556436002254486,
"eval_runtime": 367.6162,
"eval_samples_per_second": 31.386,
"eval_steps_per_second": 7.848,
"step": 6846
},
{
"epoch": 0.5002921413964359,
"grad_norm": 2.3508574962615967,
"learning_rate": 2.4977055271599893e-05,
"loss": 0.2923,
"step": 6850
},
{
"epoch": 0.5102979842243646,
"grad_norm": 2.161405324935913,
"learning_rate": 2.4191339292578617e-05,
"loss": 0.2866,
"step": 6987
},
{
"epoch": 0.5203038270522933,
"grad_norm": 1.180467963218689,
"learning_rate": 2.340642229681474e-05,
"loss": 0.2874,
"step": 7124
},
{
"epoch": 0.530309669880222,
"grad_norm": 2.1960806846618652,
"learning_rate": 2.262307980800109e-05,
"loss": 0.2649,
"step": 7261
},
{
"epoch": 0.5403155127081507,
"grad_norm": 1.662766933441162,
"learning_rate": 2.1842085794166068e-05,
"loss": 0.268,
"step": 7398
},
{
"epoch": 0.5503213555360794,
"grad_norm": 1.6623494625091553,
"learning_rate": 2.1064211902967904e-05,
"loss": 0.2538,
"step": 7535
},
{
"epoch": 0.5603271983640081,
"grad_norm": 1.6007744073867798,
"learning_rate": 2.02902266992815e-05,
"loss": 0.2654,
"step": 7672
},
{
"epoch": 0.570333041191937,
"grad_norm": 2.0483551025390625,
"learning_rate": 1.9520894905831154e-05,
"loss": 0.2567,
"step": 7809
},
{
"epoch": 0.5803388840198657,
"grad_norm": 1.1370702981948853,
"learning_rate": 1.8756976647619502e-05,
"loss": 0.2558,
"step": 7946
},
{
"epoch": 0.5903447268477944,
"grad_norm": 1.3925893306732178,
"learning_rate": 1.7999226700899093e-05,
"loss": 0.239,
"step": 8083
},
{
"epoch": 0.6003505696757231,
"grad_norm": 1.1552609205245972,
"learning_rate": 1.7248393747428747e-05,
"loss": 0.2389,
"step": 8220
},
{
"epoch": 0.6103564125036518,
"grad_norm": 1.9827595949172974,
"learning_rate": 1.6505219634751472e-05,
"loss": 0.2439,
"step": 8357
},
{
"epoch": 0.6203622553315805,
"grad_norm": 1.7899774312973022,
"learning_rate": 1.5770438643224794e-05,
"loss": 0.2471,
"step": 8494
},
{
"epoch": 0.6303680981595092,
"grad_norm": 1.239159345626831,
"learning_rate": 1.5044776760527727e-05,
"loss": 0.2329,
"step": 8631
},
{
"epoch": 0.6403739409874379,
"grad_norm": 1.22818922996521,
"learning_rate": 1.4328950964361143e-05,
"loss": 0.2316,
"step": 8768
},
{
"epoch": 0.6503797838153667,
"grad_norm": 1.210986614227295,
"learning_rate": 1.3623668514050391e-05,
"loss": 0.2221,
"step": 8905
},
{
"epoch": 0.6603856266432954,
"grad_norm": 3.446089506149292,
"learning_rate": 1.2929626251749854e-05,
"loss": 0.2269,
"step": 9042
},
{
"epoch": 0.6703914694712241,
"grad_norm": 2.4413158893585205,
"learning_rate": 1.2247509913940128e-05,
"loss": 0.2209,
"step": 9179
},
{
"epoch": 0.6803973122991528,
"grad_norm": 1.9606636762619019,
"learning_rate": 1.1577993453897933e-05,
"loss": 0.221,
"step": 9316
},
{
"epoch": 0.6904031551270815,
"grad_norm": 0.8826603293418884,
"learning_rate": 1.0921738375808166e-05,
"loss": 0.2041,
"step": 9453
},
{
"epoch": 0.7004089979550102,
"grad_norm": 1.2856823205947876,
"learning_rate": 1.0279393081176122e-05,
"loss": 0.2022,
"step": 9590
},
{
"epoch": 0.7104148407829389,
"grad_norm": 0.5094468593597412,
"learning_rate": 9.651592228185622e-06,
"loss": 0.2023,
"step": 9727
},
{
"epoch": 0.7204206836108676,
"grad_norm": 1.4797954559326172,
"learning_rate": 9.038956104635871e-06,
"loss": 0.1961,
"step": 9864
},
{
"epoch": 0.7304265264387964,
"grad_norm": 1.850120186805725,
"learning_rate": 8.442090015076842e-06,
"loss": 0.2063,
"step": 10001
},
{
"epoch": 0.7404323692667251,
"grad_norm": 1.438025712966919,
"learning_rate": 7.861583682748586e-06,
"loss": 0.1903,
"step": 10138
},
{
"epoch": 0.75,
"eval_loss": 0.254038542509079,
"eval_runtime": 367.4989,
"eval_samples_per_second": 31.396,
"eval_steps_per_second": 7.85,
"step": 10269
},
{
"epoch": 0.7504382120946538,
"grad_norm": 1.2179102897644043,
"learning_rate": 7.298010666915303e-06,
"loss": 0.1885,
"step": 10275
},
{
"epoch": 0.7604440549225825,
"grad_norm": 1.099284291267395,
"learning_rate": 6.751927796170044e-06,
"loss": 0.2005,
"step": 10412
},
{
"epoch": 0.7704498977505112,
"grad_norm": 1.2008761167526245,
"learning_rate": 6.2238746182698375e-06,
"loss": 0.1702,
"step": 10549
},
{
"epoch": 0.7804557405784399,
"grad_norm": 1.5368109941482544,
"learning_rate": 5.7143728670448095e-06,
"loss": 0.1947,
"step": 10686
},
{
"epoch": 0.7904615834063686,
"grad_norm": 1.0241951942443848,
"learning_rate": 5.223925946908093e-06,
"loss": 0.1809,
"step": 10823
},
{
"epoch": 0.8004674262342975,
"grad_norm": 1.369369387626648,
"learning_rate": 4.7530184354757675e-06,
"loss": 0.1889,
"step": 10960
},
{
"epoch": 0.8104732690622262,
"grad_norm": 0.7070077061653137,
"learning_rate": 4.30211560478837e-06,
"loss": 0.1796,
"step": 11097
},
{
"epoch": 0.8204791118901549,
"grad_norm": 1.7389978170394897,
"learning_rate": 3.871662961606784e-06,
"loss": 0.1843,
"step": 11234
},
{
"epoch": 0.8304849547180836,
"grad_norm": 0.9540761709213257,
"learning_rate": 3.4620858072370504e-06,
"loss": 0.1764,
"step": 11371
},
{
"epoch": 0.8404907975460123,
"grad_norm": 1.6178034543991089,
"learning_rate": 3.073788817318707e-06,
"loss": 0.1785,
"step": 11508
},
{
"epoch": 0.850496640373941,
"grad_norm": 1.0763949155807495,
"learning_rate": 2.7071556419920514e-06,
"loss": 0.1715,
"step": 11645
},
{
"epoch": 0.8605024832018697,
"grad_norm": 1.0416216850280762,
"learning_rate": 2.3625485268391893e-06,
"loss": 0.1609,
"step": 11782
},
{
"epoch": 0.8705083260297984,
"grad_norm": 1.1292046308517456,
"learning_rate": 2.040307954973572e-06,
"loss": 0.1739,
"step": 11919
},
{
"epoch": 0.8805141688577272,
"grad_norm": 1.6674988269805908,
"learning_rate": 1.7407523106315244e-06,
"loss": 0.155,
"step": 12056
},
{
"epoch": 0.8905200116856559,
"grad_norm": 0.8487703800201416,
"learning_rate": 1.4641775645981849e-06,
"loss": 0.1687,
"step": 12193
},
{
"epoch": 0.9005258545135846,
"grad_norm": 0.9765934944152832,
"learning_rate": 1.210856981778688e-06,
"loss": 0.1561,
"step": 12330
},
{
"epoch": 0.9105316973415133,
"grad_norm": 0.9121004343032837,
"learning_rate": 9.810408512034908e-07,
"loss": 0.1621,
"step": 12467
},
{
"epoch": 0.920537540169442,
"grad_norm": 1.1236246824264526,
"learning_rate": 7.749562387346088e-07,
"loss": 0.1749,
"step": 12604
},
{
"epoch": 0.9305433829973707,
"grad_norm": 1.0239607095718384,
"learning_rate": 5.928067627171158e-07,
"loss": 0.165,
"step": 12741
},
{
"epoch": 0.9405492258252994,
"grad_norm": 1.127655029296875,
"learning_rate": 4.347723927975417e-07,
"loss": 0.1662,
"step": 12878
},
{
"epoch": 0.9505550686532281,
"grad_norm": 0.9149182438850403,
"learning_rate": 3.01009272107991e-07,
"loss": 0.164,
"step": 13015
},
{
"epoch": 0.9605609114811569,
"grad_norm": 1.0919889211654663,
"learning_rate": 1.9164956299158322e-07,
"loss": 0.1723,
"step": 13152
},
{
"epoch": 0.9705667543090856,
"grad_norm": 2.5338237285614014,
"learning_rate": 1.0680131642176183e-07,
"loss": 0.1584,
"step": 13289
},
{
"epoch": 0.9805725971370143,
"grad_norm": 0.7331089377403259,
"learning_rate": 4.6548365244375446e-08,
"loss": 0.17,
"step": 13426
},
{
"epoch": 0.990578439964943,
"grad_norm": 1.282274603843689,
"learning_rate": 1.0950241348084422e-08,
"loss": 0.1564,
"step": 13563
},
{
"epoch": 1.0,
"eval_loss": 0.2237541824579239,
"eval_runtime": 366.8443,
"eval_samples_per_second": 31.452,
"eval_steps_per_second": 7.864,
"step": 13692
},
{
"epoch": 1.0,
"step": 13692,
"total_flos": 2.405329205216464e+19,
"train_loss": 0.3085681435799271,
"train_runtime": 41531.0913,
"train_samples_per_second": 2.637,
"train_steps_per_second": 0.33
}
],
"logging_steps": 137,
"max_steps": 13692,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3423,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.405329205216464e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}