btx / trainer_state.json
Eiki's picture
Upload folder using huggingface_hub
f5ae327 verified
{
"best_metric": 0.21310460567474365,
"best_model_checkpoint": "../../models/baseline/btx/checkpoint-13692",
"epoch": 1.0,
"eval_steps": 3423,
"global_step": 13692,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010005842827928718,
"grad_norm": 2.8703060150146484,
"learning_rate": 4.9987649590704944e-05,
"loss": 0.7257,
"step": 137
},
{
"epoch": 0.020011685655857436,
"grad_norm": 2.461472988128662,
"learning_rate": 4.9950610565428546e-05,
"loss": 0.6506,
"step": 274
},
{
"epoch": 0.03001752848378615,
"grad_norm": 2.306278944015503,
"learning_rate": 4.988891951994058e-05,
"loss": 0.6267,
"step": 411
},
{
"epoch": 0.04002337131171487,
"grad_norm": 2.7277510166168213,
"learning_rate": 4.9802637407013966e-05,
"loss": 0.5933,
"step": 548
},
{
"epoch": 0.05002921413964359,
"grad_norm": 2.524292230606079,
"learning_rate": 4.969184947620146e-05,
"loss": 0.5853,
"step": 685
},
{
"epoch": 0.0600350569675723,
"grad_norm": 2.5938446521759033,
"learning_rate": 4.9556665189606316e-05,
"loss": 0.5486,
"step": 822
},
{
"epoch": 0.07004089979550102,
"grad_norm": 2.528536796569824,
"learning_rate": 4.93972181137301e-05,
"loss": 0.5527,
"step": 959
},
{
"epoch": 0.08004674262342974,
"grad_norm": 2.1125128269195557,
"learning_rate": 4.9213665787504655e-05,
"loss": 0.5129,
"step": 1096
},
{
"epoch": 0.09005258545135846,
"grad_norm": 2.1105308532714844,
"learning_rate": 4.900618956663845e-05,
"loss": 0.5193,
"step": 1233
},
{
"epoch": 0.10005842827928718,
"grad_norm": 1.8278461694717407,
"learning_rate": 4.877499444443122e-05,
"loss": 0.5207,
"step": 1370
},
{
"epoch": 0.1100642711072159,
"grad_norm": 4.437809944152832,
"learning_rate": 4.852030884923388e-05,
"loss": 0.4967,
"step": 1507
},
{
"epoch": 0.1200701139351446,
"grad_norm": 2.0539824962615967,
"learning_rate": 4.82423844187538e-05,
"loss": 0.4825,
"step": 1644
},
{
"epoch": 0.13007595676307332,
"grad_norm": 1.672153115272522,
"learning_rate": 4.7941495751428536e-05,
"loss": 0.4648,
"step": 1781
},
{
"epoch": 0.14008179959100203,
"grad_norm": 1.9463084936141968,
"learning_rate": 4.7617940135113606e-05,
"loss": 0.4623,
"step": 1918
},
{
"epoch": 0.15008764241893077,
"grad_norm": 1.61201810836792,
"learning_rate": 4.7272037253352276e-05,
"loss": 0.4406,
"step": 2055
},
{
"epoch": 0.16009348524685948,
"grad_norm": 1.998997449874878,
"learning_rate": 4.690412886951786e-05,
"loss": 0.4563,
"step": 2192
},
{
"epoch": 0.1700993280747882,
"grad_norm": 1.4533461332321167,
"learning_rate": 4.651457848914021e-05,
"loss": 0.4449,
"step": 2329
},
{
"epoch": 0.1801051709027169,
"grad_norm": 1.3883270025253296,
"learning_rate": 4.610377100075045e-05,
"loss": 0.4186,
"step": 2466
},
{
"epoch": 0.19011101373064562,
"grad_norm": 1.6303733587265015,
"learning_rate": 4.5672112295598404e-05,
"loss": 0.42,
"step": 2603
},
{
"epoch": 0.20011685655857436,
"grad_norm": 2.2687270641326904,
"learning_rate": 4.5220028866618837e-05,
"loss": 0.4063,
"step": 2740
},
{
"epoch": 0.21012269938650308,
"grad_norm": 1.6023283004760742,
"learning_rate": 4.4747967387042424e-05,
"loss": 0.4037,
"step": 2877
},
{
"epoch": 0.2201285422144318,
"grad_norm": 1.6843392848968506,
"learning_rate": 4.4256394269067967e-05,
"loss": 0.3942,
"step": 3014
},
{
"epoch": 0.2301343850423605,
"grad_norm": 1.3607641458511353,
"learning_rate": 4.3745795203031904e-05,
"loss": 0.3898,
"step": 3151
},
{
"epoch": 0.2401402278702892,
"grad_norm": 1.3324497938156128,
"learning_rate": 4.321667467753034e-05,
"loss": 0.3937,
"step": 3288
},
{
"epoch": 0.25,
"eval_loss": 0.4498611092567444,
"eval_runtime": 365.3631,
"eval_samples_per_second": 31.58,
"eval_steps_per_second": 7.896,
"step": 3423
},
{
"epoch": 0.2501460706982179,
"grad_norm": 2.132169723510742,
"learning_rate": 4.2669555480967783e-05,
"loss": 0.3771,
"step": 3425
},
{
"epoch": 0.26015191352614664,
"grad_norm": 1.718522071838379,
"learning_rate": 4.210497818502509e-05,
"loss": 0.3865,
"step": 3562
},
{
"epoch": 0.27015775635407535,
"grad_norm": 1.6622902154922485,
"learning_rate": 4.152350061055695e-05,
"loss": 0.3621,
"step": 3699
},
{
"epoch": 0.28016359918200406,
"grad_norm": 1.836055040359497,
"learning_rate": 4.092569727644661e-05,
"loss": 0.3764,
"step": 3836
},
{
"epoch": 0.29016944200993283,
"grad_norm": 1.3349945545196533,
"learning_rate": 4.031215883196239e-05,
"loss": 0.3546,
"step": 3973
},
{
"epoch": 0.30017528483786154,
"grad_norm": 1.3506925106048584,
"learning_rate": 3.968349147317693e-05,
"loss": 0.3551,
"step": 4110
},
{
"epoch": 0.31018112766579026,
"grad_norm": 1.663672685623169,
"learning_rate": 3.904031634402552e-05,
"loss": 0.329,
"step": 4247
},
{
"epoch": 0.32018697049371897,
"grad_norm": 1.5420622825622559,
"learning_rate": 3.838326892259564e-05,
"loss": 0.3477,
"step": 4384
},
{
"epoch": 0.3301928133216477,
"grad_norm": 1.14065682888031,
"learning_rate": 3.7712998393253786e-05,
"loss": 0.3311,
"step": 4521
},
{
"epoch": 0.3401986561495764,
"grad_norm": 2.2005677223205566,
"learning_rate": 3.703016700522999e-05,
"loss": 0.3329,
"step": 4658
},
{
"epoch": 0.3502044989775051,
"grad_norm": 1.666883111000061,
"learning_rate": 3.6335449418293985e-05,
"loss": 0.3398,
"step": 4795
},
{
"epoch": 0.3602103418054338,
"grad_norm": 1.7949641942977905,
"learning_rate": 3.562953203616925e-05,
"loss": 0.3155,
"step": 4932
},
{
"epoch": 0.37021618463336253,
"grad_norm": 1.5265356302261353,
"learning_rate": 3.491311232834357e-05,
"loss": 0.3216,
"step": 5069
},
{
"epoch": 0.38022202746129125,
"grad_norm": 1.8440781831741333,
"learning_rate": 3.418689814094646e-05,
"loss": 0.3154,
"step": 5206
},
{
"epoch": 0.39022787028921996,
"grad_norm": 1.4105604887008667,
"learning_rate": 3.345160699737394e-05,
"loss": 0.3141,
"step": 5343
},
{
"epoch": 0.4002337131171487,
"grad_norm": 1.1201190948486328,
"learning_rate": 3.2707965389351925e-05,
"loss": 0.303,
"step": 5480
},
{
"epoch": 0.41023955594507744,
"grad_norm": 1.8358474969863892,
"learning_rate": 3.195670805913866e-05,
"loss": 0.3042,
"step": 5617
},
{
"epoch": 0.42024539877300615,
"grad_norm": 1.7674305438995361,
"learning_rate": 3.119857727357527e-05,
"loss": 0.2908,
"step": 5754
},
{
"epoch": 0.43025124160093486,
"grad_norm": 1.841166377067566,
"learning_rate": 3.0434322090701827e-05,
"loss": 0.2923,
"step": 5891
},
{
"epoch": 0.4402570844288636,
"grad_norm": 1.6789052486419678,
"learning_rate": 2.9664697619663472e-05,
"loss": 0.2791,
"step": 6028
},
{
"epoch": 0.4502629272567923,
"grad_norm": 1.2760952711105347,
"learning_rate": 2.8890464274637876e-05,
"loss": 0.2749,
"step": 6165
},
{
"epoch": 0.460268770084721,
"grad_norm": 1.3617960214614868,
"learning_rate": 2.8112387023521115e-05,
"loss": 0.2715,
"step": 6302
},
{
"epoch": 0.4702746129126497,
"grad_norm": 1.0967957973480225,
"learning_rate": 2.733123463211434e-05,
"loss": 0.2748,
"step": 6439
},
{
"epoch": 0.4802804557405784,
"grad_norm": 0.9176567196846008,
"learning_rate": 2.6547778904558018e-05,
"loss": 0.2747,
"step": 6576
},
{
"epoch": 0.49028629856850714,
"grad_norm": 0.9466305375099182,
"learning_rate": 2.5762793920764124e-05,
"loss": 0.2679,
"step": 6713
},
{
"epoch": 0.5,
"eval_loss": 0.31969591975212097,
"eval_runtime": 364.3381,
"eval_samples_per_second": 31.668,
"eval_steps_per_second": 7.918,
"step": 6846
},
{
"epoch": 0.5002921413964359,
"grad_norm": 1.4617935419082642,
"learning_rate": 2.4977055271599893e-05,
"loss": 0.2643,
"step": 6850
},
{
"epoch": 0.5102979842243646,
"grad_norm": 1.6485743522644043,
"learning_rate": 2.4191339292578617e-05,
"loss": 0.2605,
"step": 6987
},
{
"epoch": 0.5203038270522933,
"grad_norm": 1.0177462100982666,
"learning_rate": 2.340642229681474e-05,
"loss": 0.2597,
"step": 7124
},
{
"epoch": 0.530309669880222,
"grad_norm": 1.5512287616729736,
"learning_rate": 2.262307980800109e-05,
"loss": 0.2427,
"step": 7261
},
{
"epoch": 0.5403155127081507,
"grad_norm": 1.314630150794983,
"learning_rate": 2.1842085794166068e-05,
"loss": 0.239,
"step": 7398
},
{
"epoch": 0.5503213555360794,
"grad_norm": 1.3187059164047241,
"learning_rate": 2.1064211902967904e-05,
"loss": 0.2287,
"step": 7535
},
{
"epoch": 0.5603271983640081,
"grad_norm": 1.2602145671844482,
"learning_rate": 2.02902266992815e-05,
"loss": 0.2381,
"step": 7672
},
{
"epoch": 0.570333041191937,
"grad_norm": 1.5069787502288818,
"learning_rate": 1.9520894905831154e-05,
"loss": 0.2324,
"step": 7809
},
{
"epoch": 0.5803388840198657,
"grad_norm": 0.8896231651306152,
"learning_rate": 1.8756976647619502e-05,
"loss": 0.2292,
"step": 7946
},
{
"epoch": 0.5903447268477944,
"grad_norm": 0.9988867044448853,
"learning_rate": 1.7999226700899093e-05,
"loss": 0.2203,
"step": 8083
},
{
"epoch": 0.6003505696757231,
"grad_norm": 0.9557806253433228,
"learning_rate": 1.7248393747428747e-05,
"loss": 0.2149,
"step": 8220
},
{
"epoch": 0.6103564125036518,
"grad_norm": 1.0728305578231812,
"learning_rate": 1.6505219634751472e-05,
"loss": 0.2218,
"step": 8357
},
{
"epoch": 0.6203622553315805,
"grad_norm": 1.2526644468307495,
"learning_rate": 1.5770438643224794e-05,
"loss": 0.2266,
"step": 8494
},
{
"epoch": 0.6303680981595092,
"grad_norm": 1.0386145114898682,
"learning_rate": 1.5044776760527727e-05,
"loss": 0.2163,
"step": 8631
},
{
"epoch": 0.6403739409874379,
"grad_norm": 1.0314420461654663,
"learning_rate": 1.4328950964361143e-05,
"loss": 0.2114,
"step": 8768
},
{
"epoch": 0.6503797838153667,
"grad_norm": 0.5962662100791931,
"learning_rate": 1.3623668514050391e-05,
"loss": 0.2049,
"step": 8905
},
{
"epoch": 0.6603856266432954,
"grad_norm": 0.6801035404205322,
"learning_rate": 1.2929626251749854e-05,
"loss": 0.2085,
"step": 9042
},
{
"epoch": 0.6703914694712241,
"grad_norm": 1.8030842542648315,
"learning_rate": 1.2247509913940128e-05,
"loss": 0.2045,
"step": 9179
},
{
"epoch": 0.6803973122991528,
"grad_norm": 1.7659096717834473,
"learning_rate": 1.1577993453897933e-05,
"loss": 0.2069,
"step": 9316
},
{
"epoch": 0.6904031551270815,
"grad_norm": 0.6933948397636414,
"learning_rate": 1.0921738375808166e-05,
"loss": 0.1892,
"step": 9453
},
{
"epoch": 0.7004089979550102,
"grad_norm": 1.0741862058639526,
"learning_rate": 1.0279393081176122e-05,
"loss": 0.1893,
"step": 9590
},
{
"epoch": 0.7104148407829389,
"grad_norm": 0.31156471371650696,
"learning_rate": 9.651592228185622e-06,
"loss": 0.1894,
"step": 9727
},
{
"epoch": 0.7204206836108676,
"grad_norm": 1.0299690961837769,
"learning_rate": 9.038956104635871e-06,
"loss": 0.1844,
"step": 9864
},
{
"epoch": 0.7304265264387964,
"grad_norm": 0.8893802165985107,
"learning_rate": 8.442090015076842e-06,
"loss": 0.1952,
"step": 10001
},
{
"epoch": 0.7404323692667251,
"grad_norm": 1.3043843507766724,
"learning_rate": 7.861583682748586e-06,
"loss": 0.1804,
"step": 10138
},
{
"epoch": 0.75,
"eval_loss": 0.23675276339054108,
"eval_runtime": 365.1866,
"eval_samples_per_second": 31.595,
"eval_steps_per_second": 7.9,
"step": 10269
},
{
"epoch": 0.7504382120946538,
"grad_norm": 1.0938574075698853,
"learning_rate": 7.298010666915303e-06,
"loss": 0.1777,
"step": 10275
},
{
"epoch": 0.7604440549225825,
"grad_norm": 0.9465618133544922,
"learning_rate": 6.751927796170044e-06,
"loss": 0.1902,
"step": 10412
},
{
"epoch": 0.7704498977505112,
"grad_norm": 1.0503722429275513,
"learning_rate": 6.2238746182698375e-06,
"loss": 0.161,
"step": 10549
},
{
"epoch": 0.7804557405784399,
"grad_norm": 1.2977089881896973,
"learning_rate": 5.7143728670448095e-06,
"loss": 0.1817,
"step": 10686
},
{
"epoch": 0.7904615834063686,
"grad_norm": 0.7178501486778259,
"learning_rate": 5.223925946908093e-06,
"loss": 0.1715,
"step": 10823
},
{
"epoch": 0.8004674262342975,
"grad_norm": 1.1066858768463135,
"learning_rate": 4.7530184354757675e-06,
"loss": 0.1795,
"step": 10960
},
{
"epoch": 0.8104732690622262,
"grad_norm": 0.5346119999885559,
"learning_rate": 4.30211560478837e-06,
"loss": 0.1716,
"step": 11097
},
{
"epoch": 0.8204791118901549,
"grad_norm": 1.6099894046783447,
"learning_rate": 3.871662961606784e-06,
"loss": 0.1758,
"step": 11234
},
{
"epoch": 0.8304849547180836,
"grad_norm": 0.7507832050323486,
"learning_rate": 3.4620858072370504e-06,
"loss": 0.1706,
"step": 11371
},
{
"epoch": 0.8404907975460123,
"grad_norm": 1.2874454259872437,
"learning_rate": 3.073788817318707e-06,
"loss": 0.1713,
"step": 11508
},
{
"epoch": 0.850496640373941,
"grad_norm": 0.863970935344696,
"learning_rate": 2.7071556419920514e-06,
"loss": 0.1637,
"step": 11645
},
{
"epoch": 0.8605024832018697,
"grad_norm": 0.841557502746582,
"learning_rate": 2.3625485268391893e-06,
"loss": 0.1544,
"step": 11782
},
{
"epoch": 0.8705083260297984,
"grad_norm": 0.9113953113555908,
"learning_rate": 2.040307954973572e-06,
"loss": 0.1642,
"step": 11919
},
{
"epoch": 0.8805141688577272,
"grad_norm": 1.1008964776992798,
"learning_rate": 1.7407523106315244e-06,
"loss": 0.1501,
"step": 12056
},
{
"epoch": 0.8905200116856559,
"grad_norm": 0.7368434071540833,
"learning_rate": 1.4641775645981849e-06,
"loss": 0.1617,
"step": 12193
},
{
"epoch": 0.9005258545135846,
"grad_norm": 0.8009047508239746,
"learning_rate": 1.210856981778688e-06,
"loss": 0.1498,
"step": 12330
},
{
"epoch": 0.9105316973415133,
"grad_norm": 0.7741762399673462,
"learning_rate": 9.810408512034908e-07,
"loss": 0.1567,
"step": 12467
},
{
"epoch": 0.920537540169442,
"grad_norm": 0.5345299243927002,
"learning_rate": 7.749562387346088e-07,
"loss": 0.1668,
"step": 12604
},
{
"epoch": 0.9305433829973707,
"grad_norm": 0.8907518982887268,
"learning_rate": 5.928067627171158e-07,
"loss": 0.159,
"step": 12741
},
{
"epoch": 0.9405492258252994,
"grad_norm": 1.0891618728637695,
"learning_rate": 4.347723927975417e-07,
"loss": 0.1615,
"step": 12878
},
{
"epoch": 0.9505550686532281,
"grad_norm": 0.5715863108634949,
"learning_rate": 3.01009272107991e-07,
"loss": 0.1568,
"step": 13015
},
{
"epoch": 0.9605609114811569,
"grad_norm": 0.8792664408683777,
"learning_rate": 1.9164956299158322e-07,
"loss": 0.1656,
"step": 13152
},
{
"epoch": 0.9705667543090856,
"grad_norm": 1.4301509857177734,
"learning_rate": 1.0680131642176183e-07,
"loss": 0.1528,
"step": 13289
},
{
"epoch": 0.9805725971370143,
"grad_norm": 0.6064629554748535,
"learning_rate": 4.6548365244375446e-08,
"loss": 0.1628,
"step": 13426
},
{
"epoch": 0.990578439964943,
"grad_norm": 0.9063898921012878,
"learning_rate": 1.0950241348084422e-08,
"loss": 0.1529,
"step": 13563
},
{
"epoch": 1.0,
"eval_loss": 0.21310460567474365,
"eval_runtime": 364.3533,
"eval_samples_per_second": 31.667,
"eval_steps_per_second": 7.918,
"step": 13692
},
{
"epoch": 1.0,
"step": 13692,
"total_flos": 2.405329205216464e+19,
"train_loss": 0.29627383469211893,
"train_runtime": 43035.7563,
"train_samples_per_second": 2.545,
"train_steps_per_second": 0.318
}
],
"logging_steps": 137,
"max_steps": 13692,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3423,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.405329205216464e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}