dzanbek's picture
Training in progress, step 75, checkpoint
7f50e0e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.04992511233150274,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006656681644200366,
"grad_norm": 4.565742492675781,
"learning_rate": 6.666666666666667e-05,
"loss": 5.2699,
"step": 1
},
{
"epoch": 0.0006656681644200366,
"eval_loss": 5.414046287536621,
"eval_runtime": 292.7399,
"eval_samples_per_second": 4.321,
"eval_steps_per_second": 2.162,
"step": 1
},
{
"epoch": 0.0013313363288400732,
"grad_norm": 5.261593818664551,
"learning_rate": 0.00013333333333333334,
"loss": 5.5603,
"step": 2
},
{
"epoch": 0.00199700449326011,
"grad_norm": 5.0060577392578125,
"learning_rate": 0.0002,
"loss": 5.3154,
"step": 3
},
{
"epoch": 0.0026626726576801465,
"grad_norm": 4.290820121765137,
"learning_rate": 0.0001999048221581858,
"loss": 4.7453,
"step": 4
},
{
"epoch": 0.003328340822100183,
"grad_norm": 4.488643169403076,
"learning_rate": 0.00019961946980917456,
"loss": 4.0802,
"step": 5
},
{
"epoch": 0.00399400898652022,
"grad_norm": 4.862600803375244,
"learning_rate": 0.00019914448613738106,
"loss": 3.7522,
"step": 6
},
{
"epoch": 0.004659677150940256,
"grad_norm": 5.443639278411865,
"learning_rate": 0.00019848077530122083,
"loss": 2.7179,
"step": 7
},
{
"epoch": 0.005325345315360293,
"grad_norm": 3.63372540473938,
"learning_rate": 0.00019762960071199333,
"loss": 2.3048,
"step": 8
},
{
"epoch": 0.005991013479780329,
"grad_norm": 2.9024391174316406,
"learning_rate": 0.00019659258262890683,
"loss": 1.6602,
"step": 9
},
{
"epoch": 0.006656681644200366,
"grad_norm": 4.694620132446289,
"learning_rate": 0.0001953716950748227,
"loss": 2.0155,
"step": 10
},
{
"epoch": 0.007322349808620403,
"grad_norm": 4.4970855712890625,
"learning_rate": 0.00019396926207859084,
"loss": 1.8371,
"step": 11
},
{
"epoch": 0.00798801797304044,
"grad_norm": 3.8255815505981445,
"learning_rate": 0.0001923879532511287,
"loss": 1.7279,
"step": 12
},
{
"epoch": 0.008653686137460476,
"grad_norm": 3.8512322902679443,
"learning_rate": 0.000190630778703665,
"loss": 1.3884,
"step": 13
},
{
"epoch": 0.009319354301880512,
"grad_norm": 4.351423263549805,
"learning_rate": 0.00018870108331782217,
"loss": 1.5076,
"step": 14
},
{
"epoch": 0.00998502246630055,
"grad_norm": 4.972479820251465,
"learning_rate": 0.00018660254037844388,
"loss": 1.3592,
"step": 15
},
{
"epoch": 0.010650690630720586,
"grad_norm": 4.12640905380249,
"learning_rate": 0.0001843391445812886,
"loss": 1.2625,
"step": 16
},
{
"epoch": 0.011316358795140622,
"grad_norm": 4.288045883178711,
"learning_rate": 0.0001819152044288992,
"loss": 1.2336,
"step": 17
},
{
"epoch": 0.011982026959560658,
"grad_norm": 4.866492748260498,
"learning_rate": 0.00017933533402912354,
"loss": 1.4185,
"step": 18
},
{
"epoch": 0.012647695123980696,
"grad_norm": 3.893460512161255,
"learning_rate": 0.0001766044443118978,
"loss": 1.2172,
"step": 19
},
{
"epoch": 0.013313363288400732,
"grad_norm": 3.1255502700805664,
"learning_rate": 0.0001737277336810124,
"loss": 0.9529,
"step": 20
},
{
"epoch": 0.013979031452820768,
"grad_norm": 2.7771689891815186,
"learning_rate": 0.00017071067811865476,
"loss": 1.0,
"step": 21
},
{
"epoch": 0.014644699617240806,
"grad_norm": 2.2438104152679443,
"learning_rate": 0.00016755902076156604,
"loss": 0.7369,
"step": 22
},
{
"epoch": 0.015310367781660842,
"grad_norm": 3.5781610012054443,
"learning_rate": 0.00016427876096865394,
"loss": 1.108,
"step": 23
},
{
"epoch": 0.01597603594608088,
"grad_norm": 2.71801495552063,
"learning_rate": 0.00016087614290087208,
"loss": 0.7519,
"step": 24
},
{
"epoch": 0.016641704110500914,
"grad_norm": 2.9104671478271484,
"learning_rate": 0.0001573576436351046,
"loss": 0.6761,
"step": 25
},
{
"epoch": 0.016641704110500914,
"eval_loss": 0.7497785687446594,
"eval_runtime": 294.9753,
"eval_samples_per_second": 4.288,
"eval_steps_per_second": 2.146,
"step": 25
},
{
"epoch": 0.017307372274920952,
"grad_norm": 4.477328300476074,
"learning_rate": 0.0001537299608346824,
"loss": 0.8882,
"step": 26
},
{
"epoch": 0.01797304043934099,
"grad_norm": 2.8925857543945312,
"learning_rate": 0.00015000000000000001,
"loss": 0.6097,
"step": 27
},
{
"epoch": 0.018638708603761024,
"grad_norm": 2.060027837753296,
"learning_rate": 0.00014617486132350343,
"loss": 0.3883,
"step": 28
},
{
"epoch": 0.019304376768181062,
"grad_norm": 3.449633836746216,
"learning_rate": 0.00014226182617406996,
"loss": 0.5409,
"step": 29
},
{
"epoch": 0.0199700449326011,
"grad_norm": 5.332679748535156,
"learning_rate": 0.000138268343236509,
"loss": 0.8763,
"step": 30
},
{
"epoch": 0.020635713097021134,
"grad_norm": 3.696377754211426,
"learning_rate": 0.00013420201433256689,
"loss": 0.4409,
"step": 31
},
{
"epoch": 0.021301381261441172,
"grad_norm": 3.181502103805542,
"learning_rate": 0.00013007057995042732,
"loss": 0.4034,
"step": 32
},
{
"epoch": 0.02196704942586121,
"grad_norm": 3.712878704071045,
"learning_rate": 0.00012588190451025207,
"loss": 0.4278,
"step": 33
},
{
"epoch": 0.022632717590281244,
"grad_norm": 4.807991981506348,
"learning_rate": 0.00012164396139381029,
"loss": 0.5724,
"step": 34
},
{
"epoch": 0.023298385754701282,
"grad_norm": 4.910369873046875,
"learning_rate": 0.00011736481776669306,
"loss": 0.6912,
"step": 35
},
{
"epoch": 0.023964053919121316,
"grad_norm": 4.7990193367004395,
"learning_rate": 0.00011305261922200519,
"loss": 0.611,
"step": 36
},
{
"epoch": 0.024629722083541354,
"grad_norm": 5.814478397369385,
"learning_rate": 0.00010871557427476583,
"loss": 0.5512,
"step": 37
},
{
"epoch": 0.025295390247961392,
"grad_norm": 3.2857327461242676,
"learning_rate": 0.00010436193873653361,
"loss": 0.4976,
"step": 38
},
{
"epoch": 0.025961058412381426,
"grad_norm": 2.919463872909546,
"learning_rate": 0.0001,
"loss": 0.3717,
"step": 39
},
{
"epoch": 0.026626726576801464,
"grad_norm": 2.6156516075134277,
"learning_rate": 9.563806126346642e-05,
"loss": 0.3615,
"step": 40
},
{
"epoch": 0.027292394741221502,
"grad_norm": 2.868933916091919,
"learning_rate": 9.128442572523417e-05,
"loss": 0.3111,
"step": 41
},
{
"epoch": 0.027958062905641536,
"grad_norm": 2.936983823776245,
"learning_rate": 8.694738077799488e-05,
"loss": 0.2645,
"step": 42
},
{
"epoch": 0.028623731070061574,
"grad_norm": 1.8019835948944092,
"learning_rate": 8.263518223330697e-05,
"loss": 0.1452,
"step": 43
},
{
"epoch": 0.029289399234481612,
"grad_norm": 4.0728983879089355,
"learning_rate": 7.835603860618972e-05,
"loss": 0.3946,
"step": 44
},
{
"epoch": 0.029955067398901646,
"grad_norm": 1.7865502834320068,
"learning_rate": 7.411809548974792e-05,
"loss": 0.1265,
"step": 45
},
{
"epoch": 0.030620735563321684,
"grad_norm": 2.652951240539551,
"learning_rate": 6.992942004957271e-05,
"loss": 0.1366,
"step": 46
},
{
"epoch": 0.03128640372774172,
"grad_norm": 4.144972801208496,
"learning_rate": 6.579798566743314e-05,
"loss": 0.4658,
"step": 47
},
{
"epoch": 0.03195207189216176,
"grad_norm": 2.2921547889709473,
"learning_rate": 6.173165676349103e-05,
"loss": 0.1661,
"step": 48
},
{
"epoch": 0.032617740056581794,
"grad_norm": 2.8767738342285156,
"learning_rate": 5.773817382593008e-05,
"loss": 0.2198,
"step": 49
},
{
"epoch": 0.03328340822100183,
"grad_norm": 5.364581108093262,
"learning_rate": 5.382513867649663e-05,
"loss": 0.3999,
"step": 50
},
{
"epoch": 0.03328340822100183,
"eval_loss": 0.2637653350830078,
"eval_runtime": 294.8304,
"eval_samples_per_second": 4.291,
"eval_steps_per_second": 2.147,
"step": 50
},
{
"epoch": 0.03394907638542187,
"grad_norm": 2.3751840591430664,
"learning_rate": 5.000000000000002e-05,
"loss": 0.0587,
"step": 51
},
{
"epoch": 0.034614744549841904,
"grad_norm": 3.929506778717041,
"learning_rate": 4.6270039165317605e-05,
"loss": 0.243,
"step": 52
},
{
"epoch": 0.03528041271426194,
"grad_norm": 4.9729132652282715,
"learning_rate": 4.264235636489542e-05,
"loss": 0.3327,
"step": 53
},
{
"epoch": 0.03594608087868198,
"grad_norm": 4.64143180847168,
"learning_rate": 3.9123857099127936e-05,
"loss": 0.3707,
"step": 54
},
{
"epoch": 0.036611749043102014,
"grad_norm": 3.2200844287872314,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.2981,
"step": 55
},
{
"epoch": 0.03727741720752205,
"grad_norm": 2.6393558979034424,
"learning_rate": 3.244097923843398e-05,
"loss": 0.1529,
"step": 56
},
{
"epoch": 0.03794308537194209,
"grad_norm": 4.790804862976074,
"learning_rate": 2.9289321881345254e-05,
"loss": 0.2654,
"step": 57
},
{
"epoch": 0.038608753536362124,
"grad_norm": 1.2065534591674805,
"learning_rate": 2.6272266318987603e-05,
"loss": 0.0551,
"step": 58
},
{
"epoch": 0.03927442170078216,
"grad_norm": 3.567166328430176,
"learning_rate": 2.339555568810221e-05,
"loss": 0.3295,
"step": 59
},
{
"epoch": 0.0399400898652022,
"grad_norm": 1.8716219663619995,
"learning_rate": 2.0664665970876496e-05,
"loss": 0.1449,
"step": 60
},
{
"epoch": 0.040605758029622234,
"grad_norm": 2.8182613849639893,
"learning_rate": 1.808479557110081e-05,
"loss": 0.1724,
"step": 61
},
{
"epoch": 0.04127142619404227,
"grad_norm": 4.6796112060546875,
"learning_rate": 1.566085541871145e-05,
"loss": 0.2599,
"step": 62
},
{
"epoch": 0.04193709435846231,
"grad_norm": 3.623056411743164,
"learning_rate": 1.339745962155613e-05,
"loss": 0.0858,
"step": 63
},
{
"epoch": 0.042602762522882344,
"grad_norm": 1.3320046663284302,
"learning_rate": 1.129891668217783e-05,
"loss": 0.0667,
"step": 64
},
{
"epoch": 0.04326843068730238,
"grad_norm": 2.166306495666504,
"learning_rate": 9.369221296335006e-06,
"loss": 0.1206,
"step": 65
},
{
"epoch": 0.04393409885172242,
"grad_norm": 2.2918238639831543,
"learning_rate": 7.612046748871327e-06,
"loss": 0.0988,
"step": 66
},
{
"epoch": 0.044599767016142454,
"grad_norm": 3.8302066326141357,
"learning_rate": 6.030737921409169e-06,
"loss": 0.3119,
"step": 67
},
{
"epoch": 0.04526543518056249,
"grad_norm": 3.98258638381958,
"learning_rate": 4.628304925177318e-06,
"loss": 0.2715,
"step": 68
},
{
"epoch": 0.04593110334498253,
"grad_norm": 4.596426010131836,
"learning_rate": 3.40741737109318e-06,
"loss": 0.5579,
"step": 69
},
{
"epoch": 0.046596771509402564,
"grad_norm": 2.9032671451568604,
"learning_rate": 2.3703992880066638e-06,
"loss": 0.1493,
"step": 70
},
{
"epoch": 0.0472624396738226,
"grad_norm": 7.22641134262085,
"learning_rate": 1.5192246987791981e-06,
"loss": 0.4988,
"step": 71
},
{
"epoch": 0.04792810783824263,
"grad_norm": 5.318079948425293,
"learning_rate": 8.555138626189618e-07,
"loss": 0.3443,
"step": 72
},
{
"epoch": 0.048593776002662674,
"grad_norm": 2.0902915000915527,
"learning_rate": 3.805301908254455e-07,
"loss": 0.1034,
"step": 73
},
{
"epoch": 0.04925944416708271,
"grad_norm": 3.0144219398498535,
"learning_rate": 9.517784181422019e-08,
"loss": 0.3192,
"step": 74
},
{
"epoch": 0.04992511233150274,
"grad_norm": 4.522768974304199,
"learning_rate": 0.0,
"loss": 0.0828,
"step": 75
},
{
"epoch": 0.04992511233150274,
"eval_loss": 0.2045978456735611,
"eval_runtime": 294.7269,
"eval_samples_per_second": 4.292,
"eval_steps_per_second": 2.148,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.9164117532672e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}