pygemma / checkpoint-795 /trainer_state.json
Menouar's picture
Upload folder using huggingface_hub
d03efad verified
raw
history blame
12.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.991533396048918,
"eval_steps": 500,
"global_step": 795,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 2.453125,
"learning_rate": 2.5e-06,
"loss": 1.6646,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 2.71875,
"learning_rate": 5e-06,
"loss": 1.633,
"step": 20
},
{
"epoch": 0.11,
"grad_norm": 1.453125,
"learning_rate": 7.500000000000001e-06,
"loss": 1.5999,
"step": 30
},
{
"epoch": 0.15,
"grad_norm": 1.921875,
"learning_rate": 1e-05,
"loss": 1.6187,
"step": 40
},
{
"epoch": 0.19,
"grad_norm": 2.625,
"learning_rate": 1.25e-05,
"loss": 1.5918,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 0.59765625,
"learning_rate": 1.5000000000000002e-05,
"loss": 1.5936,
"step": 60
},
{
"epoch": 0.26,
"grad_norm": 2.015625,
"learning_rate": 1.7500000000000002e-05,
"loss": 1.5263,
"step": 70
},
{
"epoch": 0.3,
"grad_norm": 0.92578125,
"learning_rate": 2e-05,
"loss": 1.4784,
"step": 80
},
{
"epoch": 0.34,
"grad_norm": 0.474609375,
"learning_rate": 1.999034865600726e-05,
"loss": 1.3839,
"step": 90
},
{
"epoch": 0.38,
"grad_norm": 1.2109375,
"learning_rate": 1.9961413253717214e-05,
"loss": 1.3698,
"step": 100
},
{
"epoch": 0.41,
"grad_norm": 1.0390625,
"learning_rate": 1.9913249646234072e-05,
"loss": 1.2916,
"step": 110
},
{
"epoch": 0.45,
"grad_norm": 0.35546875,
"learning_rate": 1.9845950802266584e-05,
"loss": 1.2607,
"step": 120
},
{
"epoch": 0.49,
"grad_norm": 0.375,
"learning_rate": 1.9759646626673445e-05,
"loss": 1.2405,
"step": 130
},
{
"epoch": 0.53,
"grad_norm": 0.8828125,
"learning_rate": 1.9654503709711984e-05,
"loss": 1.2615,
"step": 140
},
{
"epoch": 0.56,
"grad_norm": 1.2421875,
"learning_rate": 1.9530725005474195e-05,
"loss": 1.1433,
"step": 150
},
{
"epoch": 0.6,
"grad_norm": 0.404296875,
"learning_rate": 1.93885494401308e-05,
"loss": 1.1599,
"step": 160
},
{
"epoch": 0.64,
"grad_norm": 0.91796875,
"learning_rate": 1.9228251450739495e-05,
"loss": 1.1325,
"step": 170
},
{
"epoch": 0.68,
"grad_norm": 0.55859375,
"learning_rate": 1.905014045550767e-05,
"loss": 1.0797,
"step": 180
},
{
"epoch": 0.71,
"grad_norm": 0.375,
"learning_rate": 1.8854560256532098e-05,
"loss": 1.041,
"step": 190
},
{
"epoch": 0.75,
"grad_norm": 1.2265625,
"learning_rate": 1.8641888376168483e-05,
"loss": 1.0286,
"step": 200
},
{
"epoch": 0.79,
"grad_norm": 0.287109375,
"learning_rate": 1.8412535328311813e-05,
"loss": 0.9831,
"step": 210
},
{
"epoch": 0.83,
"grad_norm": 0.28515625,
"learning_rate": 1.816694382599422e-05,
"loss": 0.9927,
"step": 220
},
{
"epoch": 0.87,
"grad_norm": 3.25,
"learning_rate": 1.7905587926829815e-05,
"loss": 1.0075,
"step": 230
},
{
"epoch": 0.9,
"grad_norm": 0.255859375,
"learning_rate": 1.762897211795607e-05,
"loss": 1.0232,
"step": 240
},
{
"epoch": 0.94,
"grad_norm": 0.6484375,
"learning_rate": 1.733763034223804e-05,
"loss": 0.9476,
"step": 250
},
{
"epoch": 0.98,
"grad_norm": 0.67578125,
"learning_rate": 1.7032124967615112e-05,
"loss": 0.9338,
"step": 260
},
{
"epoch": 1.02,
"grad_norm": 0.41796875,
"learning_rate": 1.6713045701579705e-05,
"loss": 0.9243,
"step": 270
},
{
"epoch": 1.05,
"grad_norm": 0.232421875,
"learning_rate": 1.638100845288331e-05,
"loss": 0.9084,
"step": 280
},
{
"epoch": 1.09,
"grad_norm": 0.349609375,
"learning_rate": 1.6036654142667043e-05,
"loss": 0.896,
"step": 290
},
{
"epoch": 1.13,
"grad_norm": 0.25390625,
"learning_rate": 1.568064746731156e-05,
"loss": 0.9311,
"step": 300
},
{
"epoch": 1.17,
"grad_norm": 0.236328125,
"learning_rate": 1.5313675615394373e-05,
"loss": 0.9182,
"step": 310
},
{
"epoch": 1.2,
"grad_norm": 0.326171875,
"learning_rate": 1.4936446941231186e-05,
"loss": 0.8967,
"step": 320
},
{
"epoch": 1.24,
"grad_norm": 0.2451171875,
"learning_rate": 1.4549689597561652e-05,
"loss": 0.8965,
"step": 330
},
{
"epoch": 1.28,
"grad_norm": 0.435546875,
"learning_rate": 1.4154150130018867e-05,
"loss": 0.9105,
"step": 340
},
{
"epoch": 1.32,
"grad_norm": 0.3359375,
"learning_rate": 1.375059203609562e-05,
"loss": 0.9089,
"step": 350
},
{
"epoch": 1.35,
"grad_norm": 0.2578125,
"learning_rate": 1.3339794291389015e-05,
"loss": 0.9066,
"step": 360
},
{
"epoch": 1.39,
"grad_norm": 0.271484375,
"learning_rate": 1.2922549845968174e-05,
"loss": 0.8878,
"step": 370
},
{
"epoch": 1.43,
"grad_norm": 0.30078125,
"learning_rate": 1.2499664093767458e-05,
"loss": 0.8909,
"step": 380
},
{
"epoch": 1.47,
"grad_norm": 2.8125,
"learning_rate": 1.2071953317959692e-05,
"loss": 0.8659,
"step": 390
},
{
"epoch": 1.51,
"grad_norm": 0.3125,
"learning_rate": 1.1640243115310219e-05,
"loss": 0.8685,
"step": 400
},
{
"epoch": 1.54,
"grad_norm": 0.216796875,
"learning_rate": 1.1205366802553231e-05,
"loss": 0.8959,
"step": 410
},
{
"epoch": 1.58,
"grad_norm": 0.310546875,
"learning_rate": 1.076816380786647e-05,
"loss": 0.9412,
"step": 420
},
{
"epoch": 1.62,
"grad_norm": 0.99609375,
"learning_rate": 1.0329478050549208e-05,
"loss": 0.8929,
"step": 430
},
{
"epoch": 1.66,
"grad_norm": 0.3359375,
"learning_rate": 9.890156312031165e-06,
"loss": 0.9102,
"step": 440
},
{
"epoch": 1.69,
"grad_norm": 0.37109375,
"learning_rate": 9.451046601356725e-06,
"loss": 0.8645,
"step": 450
},
{
"epoch": 1.73,
"grad_norm": 0.259765625,
"learning_rate": 9.012996518299547e-06,
"loss": 0.8554,
"step": 460
},
{
"epoch": 1.77,
"grad_norm": 0.314453125,
"learning_rate": 8.576851617267151e-06,
"loss": 0.9034,
"step": 470
},
{
"epoch": 1.81,
"grad_norm": 0.2001953125,
"learning_rate": 8.143453775153646e-06,
"loss": 0.8696,
"step": 480
},
{
"epoch": 1.84,
"grad_norm": 0.671875,
"learning_rate": 7.713639566291028e-06,
"loss": 0.8801,
"step": 490
},
{
"epoch": 1.88,
"grad_norm": 0.291015625,
"learning_rate": 7.2882386476358304e-06,
"loss": 0.8872,
"step": 500
},
{
"epoch": 1.92,
"grad_norm": 0.37109375,
"learning_rate": 6.868072157308213e-06,
"loss": 0.8586,
"step": 510
},
{
"epoch": 1.96,
"grad_norm": 0.7109375,
"learning_rate": 6.453951129574644e-06,
"loss": 0.8727,
"step": 520
},
{
"epoch": 1.99,
"grad_norm": 1.15625,
"learning_rate": 6.046674929333787e-06,
"loss": 0.8833,
"step": 530
},
{
"epoch": 2.03,
"grad_norm": 0.2431640625,
"learning_rate": 5.647029709127355e-06,
"loss": 0.9192,
"step": 540
},
{
"epoch": 2.07,
"grad_norm": 0.5859375,
"learning_rate": 5.2557868916543996e-06,
"loss": 0.9092,
"step": 550
},
{
"epoch": 2.11,
"grad_norm": 0.392578125,
"learning_rate": 4.873701680718146e-06,
"loss": 0.8563,
"step": 560
},
{
"epoch": 2.14,
"grad_norm": 0.28515625,
"learning_rate": 4.501511603479653e-06,
"loss": 0.8795,
"step": 570
},
{
"epoch": 2.18,
"grad_norm": 0.283203125,
"learning_rate": 4.13993508683214e-06,
"loss": 0.8673,
"step": 580
},
{
"epoch": 2.22,
"grad_norm": 0.283203125,
"learning_rate": 3.7896700706439826e-06,
"loss": 0.8649,
"step": 590
},
{
"epoch": 2.26,
"grad_norm": 0.40625,
"learning_rate": 3.4513926605471504e-06,
"loss": 0.8592,
"step": 600
},
{
"epoch": 2.3,
"grad_norm": 0.2490234375,
"learning_rate": 3.125755822871607e-06,
"loss": 0.8603,
"step": 610
},
{
"epoch": 2.33,
"grad_norm": 0.23828125,
"learning_rate": 2.813388124244778e-06,
"loss": 0.8614,
"step": 620
},
{
"epoch": 2.37,
"grad_norm": 0.251953125,
"learning_rate": 2.514892518288988e-06,
"loss": 0.8799,
"step": 630
},
{
"epoch": 2.41,
"grad_norm": 0.27734375,
"learning_rate": 2.230845181758928e-06,
"loss": 0.8356,
"step": 640
},
{
"epoch": 2.45,
"grad_norm": 0.453125,
"learning_rate": 1.961794402365611e-06,
"loss": 0.8888,
"step": 650
},
{
"epoch": 2.48,
"grad_norm": 0.2470703125,
"learning_rate": 1.7082595204337183e-06,
"loss": 0.8796,
"step": 660
},
{
"epoch": 2.52,
"grad_norm": 0.482421875,
"learning_rate": 1.4707299264351914e-06,
"loss": 0.8694,
"step": 670
},
{
"epoch": 2.56,
"grad_norm": 0.259765625,
"learning_rate": 1.2496641163340562e-06,
"loss": 0.8944,
"step": 680
},
{
"epoch": 2.6,
"grad_norm": 0.51953125,
"learning_rate": 1.0454888065659775e-06,
"loss": 0.8564,
"step": 690
},
{
"epoch": 2.63,
"grad_norm": 0.2265625,
"learning_rate": 8.585981103608343e-07,
"loss": 0.8361,
"step": 700
},
{
"epoch": 2.67,
"grad_norm": 0.23828125,
"learning_rate": 6.8935277699825e-07,
"loss": 0.869,
"step": 710
},
{
"epoch": 2.71,
"grad_norm": 0.2294921875,
"learning_rate": 5.380794954645141e-07,
"loss": 0.8573,
"step": 720
},
{
"epoch": 2.75,
"grad_norm": 0.35546875,
"learning_rate": 4.0507026385502747e-07,
"loss": 0.8439,
"step": 730
},
{
"epoch": 2.78,
"grad_norm": 0.216796875,
"learning_rate": 2.905818257394799e-07,
"loss": 0.9043,
"step": 740
},
{
"epoch": 2.82,
"grad_norm": 0.2177734375,
"learning_rate": 1.9483517457776436e-07,
"loss": 0.8677,
"step": 750
},
{
"epoch": 2.86,
"grad_norm": 0.3203125,
"learning_rate": 1.1801512714318286e-07,
"loss": 0.8762,
"step": 760
},
{
"epoch": 2.9,
"grad_norm": 0.390625,
"learning_rate": 6.026996677640062e-08,
"loss": 0.8791,
"step": 770
},
{
"epoch": 2.94,
"grad_norm": 0.322265625,
"learning_rate": 2.171115715874139e-08,
"loss": 0.91,
"step": 780
},
{
"epoch": 2.97,
"grad_norm": 0.212890625,
"learning_rate": 2.4131271573191172e-09,
"loss": 0.8937,
"step": 790
}
],
"logging_steps": 10,
"max_steps": 795,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.5541346535840154e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}