vllm_finetuned / trainer_state.json
ducntm's picture
Initial commit of model files
b659100
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 4145,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030156815440289506,
"grad_norm": Infinity,
"learning_rate": 4.2e-05,
"loss": 4.6298,
"step": 25
},
{
"epoch": 0.06031363088057901,
"grad_norm": 40.83469772338867,
"learning_rate": 9.200000000000001e-05,
"loss": 0.6127,
"step": 50
},
{
"epoch": 0.09047044632086852,
"grad_norm": 79.89573669433594,
"learning_rate": 9.948717948717949e-05,
"loss": 0.7524,
"step": 75
},
{
"epoch": 0.12062726176115803,
"grad_norm": 446.1002502441406,
"learning_rate": 9.89010989010989e-05,
"loss": 1.0178,
"step": 100
},
{
"epoch": 0.15078407720144751,
"grad_norm": 128.81219482421875,
"learning_rate": 9.829059829059829e-05,
"loss": 1.834,
"step": 125
},
{
"epoch": 0.18094089264173704,
"grad_norm": 37.102203369140625,
"learning_rate": 9.768009768009768e-05,
"loss": 1.7381,
"step": 150
},
{
"epoch": 0.21109770808202655,
"grad_norm": 31.156801223754883,
"learning_rate": 9.706959706959707e-05,
"loss": 1.3946,
"step": 175
},
{
"epoch": 0.24125452352231605,
"grad_norm": 37.00471115112305,
"learning_rate": 9.645909645909647e-05,
"loss": 1.2038,
"step": 200
},
{
"epoch": 0.27141133896260555,
"grad_norm": 26.962158203125,
"learning_rate": 9.584859584859585e-05,
"loss": 1.1359,
"step": 225
},
{
"epoch": 0.30156815440289503,
"grad_norm": 32.251277923583984,
"learning_rate": 9.523809523809524e-05,
"loss": 1.112,
"step": 250
},
{
"epoch": 0.33172496984318456,
"grad_norm": 26.48785972595215,
"learning_rate": 9.462759462759463e-05,
"loss": 1.071,
"step": 275
},
{
"epoch": 0.3618817852834741,
"grad_norm": 18.70881462097168,
"learning_rate": 9.401709401709401e-05,
"loss": 1.0259,
"step": 300
},
{
"epoch": 0.39203860072376356,
"grad_norm": 21.082536697387695,
"learning_rate": 9.340659340659341e-05,
"loss": 1.0232,
"step": 325
},
{
"epoch": 0.4221954161640531,
"grad_norm": 18.735260009765625,
"learning_rate": 9.27960927960928e-05,
"loss": 1.0632,
"step": 350
},
{
"epoch": 0.45235223160434257,
"grad_norm": 22.869077682495117,
"learning_rate": 9.218559218559219e-05,
"loss": 1.0362,
"step": 375
},
{
"epoch": 0.4825090470446321,
"grad_norm": 19.3148250579834,
"learning_rate": 9.157509157509158e-05,
"loss": 0.9837,
"step": 400
},
{
"epoch": 0.5126658624849216,
"grad_norm": 15.154926300048828,
"learning_rate": 9.096459096459096e-05,
"loss": 0.957,
"step": 425
},
{
"epoch": 0.5428226779252111,
"grad_norm": 15.833111763000488,
"learning_rate": 9.035409035409036e-05,
"loss": 0.9336,
"step": 450
},
{
"epoch": 0.5729794933655006,
"grad_norm": 21.135068893432617,
"learning_rate": 8.974358974358975e-05,
"loss": 0.9431,
"step": 475
},
{
"epoch": 0.6031363088057901,
"grad_norm": 17.726829528808594,
"learning_rate": 8.913308913308915e-05,
"loss": 0.9775,
"step": 500
},
{
"epoch": 0.6332931242460796,
"grad_norm": 25.220857620239258,
"learning_rate": 8.852258852258852e-05,
"loss": 1.0455,
"step": 525
},
{
"epoch": 0.6634499396863691,
"grad_norm": 22.668001174926758,
"learning_rate": 8.791208791208791e-05,
"loss": 0.9617,
"step": 550
},
{
"epoch": 0.6936067551266586,
"grad_norm": 19.432435989379883,
"learning_rate": 8.730158730158731e-05,
"loss": 0.9314,
"step": 575
},
{
"epoch": 0.7237635705669482,
"grad_norm": 18.803312301635742,
"learning_rate": 8.66910866910867e-05,
"loss": 0.9559,
"step": 600
},
{
"epoch": 0.7539203860072377,
"grad_norm": 17.0799560546875,
"learning_rate": 8.608058608058608e-05,
"loss": 0.9768,
"step": 625
},
{
"epoch": 0.7840772014475271,
"grad_norm": 18.541135787963867,
"learning_rate": 8.547008547008547e-05,
"loss": 0.9384,
"step": 650
},
{
"epoch": 0.8142340168878166,
"grad_norm": 76.61760711669922,
"learning_rate": 8.485958485958486e-05,
"loss": 0.9088,
"step": 675
},
{
"epoch": 0.8443908323281062,
"grad_norm": 15.41170883178711,
"learning_rate": 8.424908424908426e-05,
"loss": 0.8698,
"step": 700
},
{
"epoch": 0.8745476477683957,
"grad_norm": 19.12152099609375,
"learning_rate": 8.363858363858364e-05,
"loss": 0.932,
"step": 725
},
{
"epoch": 0.9047044632086851,
"grad_norm": 13.084074974060059,
"learning_rate": 8.302808302808303e-05,
"loss": 0.8682,
"step": 750
},
{
"epoch": 0.9348612786489746,
"grad_norm": 17.12626838684082,
"learning_rate": 8.241758241758242e-05,
"loss": 0.879,
"step": 775
},
{
"epoch": 0.9650180940892642,
"grad_norm": 27.889331817626953,
"learning_rate": 8.18070818070818e-05,
"loss": 0.8815,
"step": 800
},
{
"epoch": 0.9951749095295537,
"grad_norm": 15.480960845947266,
"learning_rate": 8.11965811965812e-05,
"loss": 0.8855,
"step": 825
},
{
"epoch": 1.0253317249698433,
"grad_norm": 17.717817306518555,
"learning_rate": 8.058608058608059e-05,
"loss": 0.8537,
"step": 850
},
{
"epoch": 1.0554885404101326,
"grad_norm": 18.344432830810547,
"learning_rate": 7.997557997557998e-05,
"loss": 0.9154,
"step": 875
},
{
"epoch": 1.0856453558504222,
"grad_norm": 17.965234756469727,
"learning_rate": 7.936507936507937e-05,
"loss": 0.91,
"step": 900
},
{
"epoch": 1.1158021712907118,
"grad_norm": 22.685340881347656,
"learning_rate": 7.875457875457875e-05,
"loss": 0.8627,
"step": 925
},
{
"epoch": 1.1459589867310012,
"grad_norm": 36.94546127319336,
"learning_rate": 7.814407814407815e-05,
"loss": 0.84,
"step": 950
},
{
"epoch": 1.1761158021712907,
"grad_norm": 98.67118835449219,
"learning_rate": 7.753357753357754e-05,
"loss": 0.8758,
"step": 975
},
{
"epoch": 1.2062726176115803,
"grad_norm": 22.5080509185791,
"learning_rate": 7.694749694749695e-05,
"loss": 0.9677,
"step": 1000
},
{
"epoch": 1.2364294330518697,
"grad_norm": 20.48828125,
"learning_rate": 7.633699633699634e-05,
"loss": 1.2519,
"step": 1025
},
{
"epoch": 1.2665862484921593,
"grad_norm": 16.624662399291992,
"learning_rate": 7.572649572649573e-05,
"loss": 0.8674,
"step": 1050
},
{
"epoch": 1.2967430639324489,
"grad_norm": 17.774490356445312,
"learning_rate": 7.511599511599511e-05,
"loss": 0.8505,
"step": 1075
},
{
"epoch": 1.3268998793727382,
"grad_norm": 12.95785903930664,
"learning_rate": 7.450549450549451e-05,
"loss": 0.8215,
"step": 1100
},
{
"epoch": 1.3570566948130278,
"grad_norm": 14.339399337768555,
"learning_rate": 7.38949938949939e-05,
"loss": 0.8527,
"step": 1125
},
{
"epoch": 1.3872135102533172,
"grad_norm": 13.43790340423584,
"learning_rate": 7.328449328449329e-05,
"loss": 0.7856,
"step": 1150
},
{
"epoch": 1.4173703256936068,
"grad_norm": 15.764432907104492,
"learning_rate": 7.267399267399268e-05,
"loss": 0.8953,
"step": 1175
},
{
"epoch": 1.4475271411338961,
"grad_norm": 17.939727783203125,
"learning_rate": 7.206349206349206e-05,
"loss": 0.7962,
"step": 1200
},
{
"epoch": 1.4776839565741857,
"grad_norm": 22.913164138793945,
"learning_rate": 7.145299145299146e-05,
"loss": 0.8549,
"step": 1225
},
{
"epoch": 1.5078407720144753,
"grad_norm": 36.68199157714844,
"learning_rate": 7.084249084249085e-05,
"loss": 0.7912,
"step": 1250
},
{
"epoch": 1.5379975874547647,
"grad_norm": 14.023738861083984,
"learning_rate": 7.025641025641025e-05,
"loss": 0.9224,
"step": 1275
},
{
"epoch": 1.5681544028950543,
"grad_norm": 14.844511985778809,
"learning_rate": 6.964590964590965e-05,
"loss": 0.8707,
"step": 1300
},
{
"epoch": 1.5983112183353438,
"grad_norm": 67.9472427368164,
"learning_rate": 6.903540903540904e-05,
"loss": 0.8692,
"step": 1325
},
{
"epoch": 1.6284680337756332,
"grad_norm": 13.173006057739258,
"learning_rate": 6.842490842490842e-05,
"loss": 0.8179,
"step": 1350
},
{
"epoch": 1.6586248492159228,
"grad_norm": 18.236101150512695,
"learning_rate": 6.781440781440782e-05,
"loss": 0.8281,
"step": 1375
},
{
"epoch": 1.6887816646562124,
"grad_norm": 20.294797897338867,
"learning_rate": 6.720390720390721e-05,
"loss": 0.8273,
"step": 1400
},
{
"epoch": 1.7189384800965017,
"grad_norm": 18.06429672241211,
"learning_rate": 6.65934065934066e-05,
"loss": 0.7984,
"step": 1425
},
{
"epoch": 1.7490952955367913,
"grad_norm": 159.88597106933594,
"learning_rate": 6.598290598290599e-05,
"loss": 0.8045,
"step": 1450
},
{
"epoch": 1.779252110977081,
"grad_norm": 32.71599197387695,
"learning_rate": 6.537240537240537e-05,
"loss": 0.8615,
"step": 1475
},
{
"epoch": 1.8094089264173703,
"grad_norm": 20.00482177734375,
"learning_rate": 6.476190476190477e-05,
"loss": 0.8346,
"step": 1500
},
{
"epoch": 1.8395657418576599,
"grad_norm": 29.308307647705078,
"learning_rate": 6.415140415140416e-05,
"loss": 0.7786,
"step": 1525
},
{
"epoch": 1.8697225572979495,
"grad_norm": 815.1858520507812,
"learning_rate": 6.354090354090355e-05,
"loss": 0.8377,
"step": 1550
},
{
"epoch": 1.8998793727382388,
"grad_norm": 17.91884994506836,
"learning_rate": 6.293040293040293e-05,
"loss": 0.7578,
"step": 1575
},
{
"epoch": 1.9300361881785284,
"grad_norm": 53.48311996459961,
"learning_rate": 6.231990231990232e-05,
"loss": 0.8059,
"step": 1600
},
{
"epoch": 1.960193003618818,
"grad_norm": 16.006927490234375,
"learning_rate": 6.170940170940172e-05,
"loss": 0.8219,
"step": 1625
},
{
"epoch": 1.9903498190591074,
"grad_norm": 26.18201446533203,
"learning_rate": 6.10989010989011e-05,
"loss": 0.8062,
"step": 1650
},
{
"epoch": 2.0205066344993967,
"grad_norm": 41.472023010253906,
"learning_rate": 6.048840048840049e-05,
"loss": 0.782,
"step": 1675
},
{
"epoch": 2.0506634499396865,
"grad_norm": 17.68017578125,
"learning_rate": 5.987789987789988e-05,
"loss": 0.787,
"step": 1700
},
{
"epoch": 2.080820265379976,
"grad_norm": 16.18324089050293,
"learning_rate": 5.9267399267399274e-05,
"loss": 0.7642,
"step": 1725
},
{
"epoch": 2.1109770808202653,
"grad_norm": 15.70964527130127,
"learning_rate": 5.865689865689866e-05,
"loss": 0.793,
"step": 1750
},
{
"epoch": 2.141133896260555,
"grad_norm": 49.760772705078125,
"learning_rate": 5.8046398046398054e-05,
"loss": 0.8482,
"step": 1775
},
{
"epoch": 2.1712907117008444,
"grad_norm": 15.465973854064941,
"learning_rate": 5.7435897435897434e-05,
"loss": 0.824,
"step": 1800
},
{
"epoch": 2.201447527141134,
"grad_norm": 27.056657791137695,
"learning_rate": 5.682539682539683e-05,
"loss": 0.7957,
"step": 1825
},
{
"epoch": 2.2316043425814236,
"grad_norm": 14.316446304321289,
"learning_rate": 5.6214896214896215e-05,
"loss": 0.807,
"step": 1850
},
{
"epoch": 2.261761158021713,
"grad_norm": 13.680840492248535,
"learning_rate": 5.560439560439561e-05,
"loss": 0.7543,
"step": 1875
},
{
"epoch": 2.2919179734620023,
"grad_norm": 16.61717987060547,
"learning_rate": 5.4993894993895e-05,
"loss": 0.7723,
"step": 1900
},
{
"epoch": 2.3220747889022917,
"grad_norm": 21.093223571777344,
"learning_rate": 5.438339438339438e-05,
"loss": 0.7988,
"step": 1925
},
{
"epoch": 2.3522316043425815,
"grad_norm": 17.148927688598633,
"learning_rate": 5.3772893772893775e-05,
"loss": 0.7661,
"step": 1950
},
{
"epoch": 2.382388419782871,
"grad_norm": 15.15334415435791,
"learning_rate": 5.316239316239316e-05,
"loss": 0.8032,
"step": 1975
},
{
"epoch": 2.4125452352231607,
"grad_norm": 15.025490760803223,
"learning_rate": 5.2551892551892556e-05,
"loss": 0.7666,
"step": 2000
},
{
"epoch": 2.44270205066345,
"grad_norm": 39.71788787841797,
"learning_rate": 5.194139194139195e-05,
"loss": 0.7697,
"step": 2025
},
{
"epoch": 2.4728588661037394,
"grad_norm": 35.849727630615234,
"learning_rate": 5.133089133089133e-05,
"loss": 0.7531,
"step": 2050
},
{
"epoch": 2.5030156815440288,
"grad_norm": 34.135833740234375,
"learning_rate": 5.072039072039072e-05,
"loss": 0.7767,
"step": 2075
},
{
"epoch": 2.5331724969843186,
"grad_norm": 13.79836368560791,
"learning_rate": 5.010989010989011e-05,
"loss": 0.7961,
"step": 2100
},
{
"epoch": 2.563329312424608,
"grad_norm": 27.408437728881836,
"learning_rate": 4.94993894993895e-05,
"loss": 0.7926,
"step": 2125
},
{
"epoch": 2.5934861278648977,
"grad_norm": 11.799072265625,
"learning_rate": 4.888888888888889e-05,
"loss": 0.7315,
"step": 2150
},
{
"epoch": 2.623642943305187,
"grad_norm": 22.04068374633789,
"learning_rate": 4.8278388278388283e-05,
"loss": 0.749,
"step": 2175
},
{
"epoch": 2.6537997587454765,
"grad_norm": 18.57539939880371,
"learning_rate": 4.766788766788767e-05,
"loss": 0.7661,
"step": 2200
},
{
"epoch": 2.683956574185766,
"grad_norm": 14.180893898010254,
"learning_rate": 4.705738705738706e-05,
"loss": 0.7402,
"step": 2225
},
{
"epoch": 2.7141133896260556,
"grad_norm": 15.663522720336914,
"learning_rate": 4.644688644688645e-05,
"loss": 0.7778,
"step": 2250
},
{
"epoch": 2.744270205066345,
"grad_norm": 14.202746391296387,
"learning_rate": 4.583638583638584e-05,
"loss": 0.7922,
"step": 2275
},
{
"epoch": 2.7744270205066344,
"grad_norm": 19.26568031311035,
"learning_rate": 4.522588522588523e-05,
"loss": 0.7454,
"step": 2300
},
{
"epoch": 2.804583835946924,
"grad_norm": 30.88574981689453,
"learning_rate": 4.461538461538462e-05,
"loss": 0.7383,
"step": 2325
},
{
"epoch": 2.8347406513872135,
"grad_norm": 17.263395309448242,
"learning_rate": 4.4004884004884005e-05,
"loss": 0.7409,
"step": 2350
},
{
"epoch": 2.864897466827503,
"grad_norm": 15.320577621459961,
"learning_rate": 4.33943833943834e-05,
"loss": 0.7946,
"step": 2375
},
{
"epoch": 2.8950542822677923,
"grad_norm": 11.715496063232422,
"learning_rate": 4.2783882783882785e-05,
"loss": 0.7125,
"step": 2400
},
{
"epoch": 2.925211097708082,
"grad_norm": 14.207977294921875,
"learning_rate": 4.217338217338218e-05,
"loss": 0.7403,
"step": 2425
},
{
"epoch": 2.9553679131483714,
"grad_norm": 16.815935134887695,
"learning_rate": 4.1562881562881565e-05,
"loss": 0.754,
"step": 2450
},
{
"epoch": 2.9855247285886612,
"grad_norm": 21.52472496032715,
"learning_rate": 4.095238095238095e-05,
"loss": 0.7502,
"step": 2475
},
{
"epoch": 3.0156815440289506,
"grad_norm": 12.597681999206543,
"learning_rate": 4.0341880341880346e-05,
"loss": 0.7553,
"step": 2500
},
{
"epoch": 3.04583835946924,
"grad_norm": 15.156243324279785,
"learning_rate": 3.973137973137973e-05,
"loss": 0.6723,
"step": 2525
},
{
"epoch": 3.0759951749095293,
"grad_norm": 19.262258529663086,
"learning_rate": 3.912087912087912e-05,
"loss": 0.7203,
"step": 2550
},
{
"epoch": 3.106151990349819,
"grad_norm": 12.042356491088867,
"learning_rate": 3.851037851037851e-05,
"loss": 0.7715,
"step": 2575
},
{
"epoch": 3.1363088057901085,
"grad_norm": 15.979390144348145,
"learning_rate": 3.78998778998779e-05,
"loss": 0.7234,
"step": 2600
},
{
"epoch": 3.166465621230398,
"grad_norm": 27.65557861328125,
"learning_rate": 3.728937728937729e-05,
"loss": 0.6915,
"step": 2625
},
{
"epoch": 3.1966224366706877,
"grad_norm": 28.780155181884766,
"learning_rate": 3.667887667887668e-05,
"loss": 0.6991,
"step": 2650
},
{
"epoch": 3.226779252110977,
"grad_norm": 23.69841957092285,
"learning_rate": 3.606837606837607e-05,
"loss": 0.7365,
"step": 2675
},
{
"epoch": 3.2569360675512664,
"grad_norm": 26.912643432617188,
"learning_rate": 3.545787545787546e-05,
"loss": 0.7455,
"step": 2700
},
{
"epoch": 3.287092882991556,
"grad_norm": 25.788583755493164,
"learning_rate": 3.484737484737485e-05,
"loss": 0.7622,
"step": 2725
},
{
"epoch": 3.3172496984318456,
"grad_norm": 36.25239181518555,
"learning_rate": 3.423687423687424e-05,
"loss": 0.7134,
"step": 2750
},
{
"epoch": 3.347406513872135,
"grad_norm": 25.903047561645508,
"learning_rate": 3.362637362637363e-05,
"loss": 0.7219,
"step": 2775
},
{
"epoch": 3.3775633293124248,
"grad_norm": 53.384639739990234,
"learning_rate": 3.3015873015873014e-05,
"loss": 0.7554,
"step": 2800
},
{
"epoch": 3.407720144752714,
"grad_norm": 14.4858980178833,
"learning_rate": 3.240537240537241e-05,
"loss": 0.7368,
"step": 2825
},
{
"epoch": 3.4378769601930035,
"grad_norm": 16.597522735595703,
"learning_rate": 3.1794871794871795e-05,
"loss": 0.7357,
"step": 2850
},
{
"epoch": 3.4680337756332933,
"grad_norm": 12.060571670532227,
"learning_rate": 3.118437118437119e-05,
"loss": 0.6797,
"step": 2875
},
{
"epoch": 3.4981905910735827,
"grad_norm": 14.644749641418457,
"learning_rate": 3.0573870573870575e-05,
"loss": 0.7417,
"step": 2900
},
{
"epoch": 3.528347406513872,
"grad_norm": 12.184840202331543,
"learning_rate": 2.9963369963369965e-05,
"loss": 0.7043,
"step": 2925
},
{
"epoch": 3.558504221954162,
"grad_norm": 14.599753379821777,
"learning_rate": 2.9352869352869355e-05,
"loss": 0.7172,
"step": 2950
},
{
"epoch": 3.588661037394451,
"grad_norm": 29.58240509033203,
"learning_rate": 2.8742368742368742e-05,
"loss": 0.7016,
"step": 2975
},
{
"epoch": 3.6188178528347406,
"grad_norm": 12.626923561096191,
"learning_rate": 2.8131868131868132e-05,
"loss": 0.719,
"step": 3000
},
{
"epoch": 3.64897466827503,
"grad_norm": 17.7034969329834,
"learning_rate": 2.7521367521367526e-05,
"loss": 0.6679,
"step": 3025
},
{
"epoch": 3.6791314837153197,
"grad_norm": 16.806182861328125,
"learning_rate": 2.6910866910866913e-05,
"loss": 0.6795,
"step": 3050
},
{
"epoch": 3.709288299155609,
"grad_norm": 11.212449073791504,
"learning_rate": 2.6300366300366303e-05,
"loss": 0.6742,
"step": 3075
},
{
"epoch": 3.739445114595899,
"grad_norm": 16.0538272857666,
"learning_rate": 2.568986568986569e-05,
"loss": 0.6756,
"step": 3100
},
{
"epoch": 3.7696019300361883,
"grad_norm": 18.737091064453125,
"learning_rate": 2.507936507936508e-05,
"loss": 0.7179,
"step": 3125
},
{
"epoch": 3.7997587454764776,
"grad_norm": 13.685617446899414,
"learning_rate": 2.446886446886447e-05,
"loss": 0.667,
"step": 3150
},
{
"epoch": 3.829915560916767,
"grad_norm": 16.607234954833984,
"learning_rate": 2.385836385836386e-05,
"loss": 0.6868,
"step": 3175
},
{
"epoch": 3.860072376357057,
"grad_norm": 16.519487380981445,
"learning_rate": 2.324786324786325e-05,
"loss": 0.6811,
"step": 3200
},
{
"epoch": 3.890229191797346,
"grad_norm": 20.921655654907227,
"learning_rate": 2.2637362637362637e-05,
"loss": 0.6893,
"step": 3225
},
{
"epoch": 3.920386007237636,
"grad_norm": 13.694364547729492,
"learning_rate": 2.2026862026862027e-05,
"loss": 0.6833,
"step": 3250
},
{
"epoch": 3.9505428226779253,
"grad_norm": 15.668880462646484,
"learning_rate": 2.1416361416361417e-05,
"loss": 0.6897,
"step": 3275
},
{
"epoch": 3.9806996381182147,
"grad_norm": 9.373758316040039,
"learning_rate": 2.0805860805860808e-05,
"loss": 0.6629,
"step": 3300
},
{
"epoch": 4.010856453558504,
"grad_norm": 17.02540397644043,
"learning_rate": 2.0195360195360198e-05,
"loss": 0.6492,
"step": 3325
},
{
"epoch": 4.041013268998793,
"grad_norm": 24.518537521362305,
"learning_rate": 1.9584859584859585e-05,
"loss": 0.6732,
"step": 3350
},
{
"epoch": 4.071170084439084,
"grad_norm": 15.94694995880127,
"learning_rate": 1.8974358974358975e-05,
"loss": 0.6687,
"step": 3375
},
{
"epoch": 4.101326899879373,
"grad_norm": 12.533055305480957,
"learning_rate": 1.8363858363858365e-05,
"loss": 0.6745,
"step": 3400
},
{
"epoch": 4.131483715319662,
"grad_norm": 17.95030403137207,
"learning_rate": 1.7753357753357755e-05,
"loss": 0.6614,
"step": 3425
},
{
"epoch": 4.161640530759952,
"grad_norm": 16.25010108947754,
"learning_rate": 1.7142857142857145e-05,
"loss": 0.6743,
"step": 3450
},
{
"epoch": 4.191797346200241,
"grad_norm": 16.024572372436523,
"learning_rate": 1.6532356532356532e-05,
"loss": 0.6436,
"step": 3475
},
{
"epoch": 4.2219541616405305,
"grad_norm": 22.803369522094727,
"learning_rate": 1.5921855921855922e-05,
"loss": 0.6421,
"step": 3500
},
{
"epoch": 4.25211097708082,
"grad_norm": 14.937376022338867,
"learning_rate": 1.5311355311355312e-05,
"loss": 0.6565,
"step": 3525
},
{
"epoch": 4.28226779252111,
"grad_norm": 14.354950904846191,
"learning_rate": 1.4700854700854703e-05,
"loss": 0.6647,
"step": 3550
},
{
"epoch": 4.3124246079613995,
"grad_norm": 17.91471290588379,
"learning_rate": 1.4090354090354091e-05,
"loss": 0.6313,
"step": 3575
},
{
"epoch": 4.342581423401689,
"grad_norm": 33.2518424987793,
"learning_rate": 1.347985347985348e-05,
"loss": 0.6264,
"step": 3600
},
{
"epoch": 4.372738238841978,
"grad_norm": 13.307084083557129,
"learning_rate": 1.2869352869352868e-05,
"loss": 0.6563,
"step": 3625
},
{
"epoch": 4.402895054282268,
"grad_norm": 15.540495872497559,
"learning_rate": 1.2258852258852258e-05,
"loss": 0.6654,
"step": 3650
},
{
"epoch": 4.433051869722557,
"grad_norm": 14.595681190490723,
"learning_rate": 1.1648351648351648e-05,
"loss": 0.6633,
"step": 3675
},
{
"epoch": 4.463208685162847,
"grad_norm": 19.460350036621094,
"learning_rate": 1.1037851037851039e-05,
"loss": 0.6612,
"step": 3700
},
{
"epoch": 4.4933655006031366,
"grad_norm": 13.955097198486328,
"learning_rate": 1.0427350427350429e-05,
"loss": 0.6768,
"step": 3725
},
{
"epoch": 4.523522316043426,
"grad_norm": 13.924335479736328,
"learning_rate": 9.816849816849817e-06,
"loss": 0.6451,
"step": 3750
},
{
"epoch": 4.553679131483715,
"grad_norm": 12.562804222106934,
"learning_rate": 9.206349206349207e-06,
"loss": 0.6408,
"step": 3775
},
{
"epoch": 4.583835946924005,
"grad_norm": 11.879654884338379,
"learning_rate": 8.595848595848596e-06,
"loss": 0.6746,
"step": 3800
},
{
"epoch": 4.613992762364294,
"grad_norm": 12.780753135681152,
"learning_rate": 7.985347985347984e-06,
"loss": 0.6322,
"step": 3825
},
{
"epoch": 4.644149577804583,
"grad_norm": 13.602087020874023,
"learning_rate": 7.374847374847375e-06,
"loss": 0.6343,
"step": 3850
},
{
"epoch": 4.674306393244874,
"grad_norm": 16.94817352294922,
"learning_rate": 6.764346764346764e-06,
"loss": 0.6351,
"step": 3875
},
{
"epoch": 4.704463208685163,
"grad_norm": 10.593673706054688,
"learning_rate": 6.153846153846155e-06,
"loss": 0.5892,
"step": 3900
},
{
"epoch": 4.734620024125452,
"grad_norm": 21.8657283782959,
"learning_rate": 5.543345543345543e-06,
"loss": 0.661,
"step": 3925
},
{
"epoch": 4.764776839565742,
"grad_norm": 14.38430404663086,
"learning_rate": 4.932844932844933e-06,
"loss": 0.6261,
"step": 3950
},
{
"epoch": 4.794933655006031,
"grad_norm": 9.656220436096191,
"learning_rate": 4.322344322344323e-06,
"loss": 0.6405,
"step": 3975
},
{
"epoch": 4.825090470446321,
"grad_norm": 13.676799774169922,
"learning_rate": 3.711843711843712e-06,
"loss": 0.6169,
"step": 4000
},
{
"epoch": 4.855247285886611,
"grad_norm": 11.05395221710205,
"learning_rate": 3.1013431013431015e-06,
"loss": 0.6447,
"step": 4025
},
{
"epoch": 4.8854041013269,
"grad_norm": 13.141251564025879,
"learning_rate": 2.4908424908424913e-06,
"loss": 0.6244,
"step": 4050
},
{
"epoch": 4.915560916767189,
"grad_norm": 13.8480863571167,
"learning_rate": 1.8803418803418804e-06,
"loss": 0.638,
"step": 4075
},
{
"epoch": 4.945717732207479,
"grad_norm": 12.424724578857422,
"learning_rate": 1.26984126984127e-06,
"loss": 0.6151,
"step": 4100
},
{
"epoch": 4.975874547647768,
"grad_norm": 14.598273277282715,
"learning_rate": 6.593406593406594e-07,
"loss": 0.6537,
"step": 4125
}
],
"logging_steps": 25,
"max_steps": 4145,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.058118140105759e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}