manu
/

ColPali
Safetensors
English
vidore-experimental
colqwen2-ba64 / checkpoint-1847 /trainer_state.json
manu's picture
Upload folder using huggingface_hub
f91c59c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 1847,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005414185165132647,
"grad_norm": 4.75,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.8236,
"step": 10
},
{
"epoch": 0.010828370330265295,
"grad_norm": 4.59375,
"learning_rate": 4.000000000000001e-06,
"loss": 0.8484,
"step": 20
},
{
"epoch": 0.016242555495397944,
"grad_norm": 5.21875,
"learning_rate": 6e-06,
"loss": 0.8014,
"step": 30
},
{
"epoch": 0.02165674066053059,
"grad_norm": 3.984375,
"learning_rate": 8.000000000000001e-06,
"loss": 0.779,
"step": 40
},
{
"epoch": 0.02707092582566324,
"grad_norm": 2.875,
"learning_rate": 1e-05,
"loss": 0.7153,
"step": 50
},
{
"epoch": 0.03248511099079589,
"grad_norm": 2.328125,
"learning_rate": 1.2e-05,
"loss": 0.6692,
"step": 60
},
{
"epoch": 0.03789929615592853,
"grad_norm": 2.359375,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.672,
"step": 70
},
{
"epoch": 0.04331348132106118,
"grad_norm": 1.9765625,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.6071,
"step": 80
},
{
"epoch": 0.04872766648619383,
"grad_norm": 1.8046875,
"learning_rate": 1.8e-05,
"loss": 0.5757,
"step": 90
},
{
"epoch": 0.05414185165132648,
"grad_norm": 1.6640625,
"learning_rate": 2e-05,
"loss": 0.5318,
"step": 100
},
{
"epoch": 0.05414185165132648,
"eval_loss": 0.4426738917827606,
"eval_runtime": 58.7167,
"eval_samples_per_second": 8.515,
"eval_steps_per_second": 0.545,
"step": 100
},
{
"epoch": 0.05955603681645912,
"grad_norm": 3.078125,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.5026,
"step": 110
},
{
"epoch": 0.06497022198159177,
"grad_norm": 3.234375,
"learning_rate": 2.4e-05,
"loss": 0.4925,
"step": 120
},
{
"epoch": 0.07038440714672442,
"grad_norm": 1.4296875,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.4557,
"step": 130
},
{
"epoch": 0.07579859231185707,
"grad_norm": 1.2734375,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.4558,
"step": 140
},
{
"epoch": 0.08121277747698971,
"grad_norm": 1.234375,
"learning_rate": 3e-05,
"loss": 0.426,
"step": 150
},
{
"epoch": 0.08662696264212236,
"grad_norm": 1.203125,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.3799,
"step": 160
},
{
"epoch": 0.092041147807255,
"grad_norm": 1.3125,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.3866,
"step": 170
},
{
"epoch": 0.09745533297238766,
"grad_norm": 1.078125,
"learning_rate": 3.6e-05,
"loss": 0.3668,
"step": 180
},
{
"epoch": 0.10286951813752031,
"grad_norm": 1.3359375,
"learning_rate": 3.8e-05,
"loss": 0.3396,
"step": 190
},
{
"epoch": 0.10828370330265295,
"grad_norm": 1.1015625,
"learning_rate": 4e-05,
"loss": 0.3133,
"step": 200
},
{
"epoch": 0.10828370330265295,
"eval_loss": 0.23219549655914307,
"eval_runtime": 60.3361,
"eval_samples_per_second": 8.287,
"eval_steps_per_second": 0.53,
"step": 200
},
{
"epoch": 0.1136978884677856,
"grad_norm": 1.0390625,
"learning_rate": 4.2e-05,
"loss": 0.2862,
"step": 210
},
{
"epoch": 0.11911207363291824,
"grad_norm": 1.1484375,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.2726,
"step": 220
},
{
"epoch": 0.12452625879805089,
"grad_norm": 0.9765625,
"learning_rate": 4.600000000000001e-05,
"loss": 0.2823,
"step": 230
},
{
"epoch": 0.12994044396318355,
"grad_norm": 1.15625,
"learning_rate": 4.8e-05,
"loss": 0.2724,
"step": 240
},
{
"epoch": 0.1353546291283162,
"grad_norm": 1.359375,
"learning_rate": 5e-05,
"loss": 0.2563,
"step": 250
},
{
"epoch": 0.14076881429344884,
"grad_norm": 1.359375,
"learning_rate": 4.9686912961803384e-05,
"loss": 0.2243,
"step": 260
},
{
"epoch": 0.1461829994585815,
"grad_norm": 1.734375,
"learning_rate": 4.9373825923606765e-05,
"loss": 0.2297,
"step": 270
},
{
"epoch": 0.15159718462371413,
"grad_norm": 2.078125,
"learning_rate": 4.906073888541015e-05,
"loss": 0.207,
"step": 280
},
{
"epoch": 0.15701136978884678,
"grad_norm": 2.34375,
"learning_rate": 4.874765184721353e-05,
"loss": 0.2255,
"step": 290
},
{
"epoch": 0.16242555495397942,
"grad_norm": 1.1953125,
"learning_rate": 4.843456480901691e-05,
"loss": 0.1946,
"step": 300
},
{
"epoch": 0.16242555495397942,
"eval_loss": 0.1316749006509781,
"eval_runtime": 58.9058,
"eval_samples_per_second": 8.488,
"eval_steps_per_second": 0.543,
"step": 300
},
{
"epoch": 0.16783974011911207,
"grad_norm": 1.8671875,
"learning_rate": 4.812147777082029e-05,
"loss": 0.2035,
"step": 310
},
{
"epoch": 0.17325392528424471,
"grad_norm": 1.40625,
"learning_rate": 4.780839073262367e-05,
"loss": 0.1852,
"step": 320
},
{
"epoch": 0.17866811044937736,
"grad_norm": 1.375,
"learning_rate": 4.7495303694427054e-05,
"loss": 0.2054,
"step": 330
},
{
"epoch": 0.18408229561451,
"grad_norm": 2.046875,
"learning_rate": 4.7182216656230435e-05,
"loss": 0.1803,
"step": 340
},
{
"epoch": 0.18949648077964265,
"grad_norm": 1.6875,
"learning_rate": 4.6869129618033816e-05,
"loss": 0.1993,
"step": 350
},
{
"epoch": 0.19491066594477532,
"grad_norm": 1.3671875,
"learning_rate": 4.65560425798372e-05,
"loss": 0.1954,
"step": 360
},
{
"epoch": 0.20032485110990797,
"grad_norm": 1.03125,
"learning_rate": 4.624295554164057e-05,
"loss": 0.1802,
"step": 370
},
{
"epoch": 0.20573903627504062,
"grad_norm": 0.87890625,
"learning_rate": 4.5929868503443954e-05,
"loss": 0.1941,
"step": 380
},
{
"epoch": 0.21115322144017326,
"grad_norm": 1.234375,
"learning_rate": 4.561678146524734e-05,
"loss": 0.1706,
"step": 390
},
{
"epoch": 0.2165674066053059,
"grad_norm": 0.9765625,
"learning_rate": 4.5303694427050724e-05,
"loss": 0.1845,
"step": 400
},
{
"epoch": 0.2165674066053059,
"eval_loss": 0.11701580137014389,
"eval_runtime": 58.716,
"eval_samples_per_second": 8.516,
"eval_steps_per_second": 0.545,
"step": 400
},
{
"epoch": 0.22198159177043855,
"grad_norm": 1.5078125,
"learning_rate": 4.4990607388854105e-05,
"loss": 0.1386,
"step": 410
},
{
"epoch": 0.2273957769355712,
"grad_norm": 1.015625,
"learning_rate": 4.4677520350657486e-05,
"loss": 0.1599,
"step": 420
},
{
"epoch": 0.23280996210070384,
"grad_norm": 1.84375,
"learning_rate": 4.436443331246087e-05,
"loss": 0.1856,
"step": 430
},
{
"epoch": 0.2382241472658365,
"grad_norm": 0.8203125,
"learning_rate": 4.405134627426425e-05,
"loss": 0.1766,
"step": 440
},
{
"epoch": 0.24363833243096913,
"grad_norm": 0.88671875,
"learning_rate": 4.373825923606763e-05,
"loss": 0.1448,
"step": 450
},
{
"epoch": 0.24905251759610178,
"grad_norm": 1.4453125,
"learning_rate": 4.342517219787101e-05,
"loss": 0.1838,
"step": 460
},
{
"epoch": 0.25446670276123445,
"grad_norm": 0.87109375,
"learning_rate": 4.3112085159674393e-05,
"loss": 0.1698,
"step": 470
},
{
"epoch": 0.2598808879263671,
"grad_norm": 1.6328125,
"learning_rate": 4.2798998121477775e-05,
"loss": 0.1392,
"step": 480
},
{
"epoch": 0.26529507309149974,
"grad_norm": 1.1171875,
"learning_rate": 4.2485911083281156e-05,
"loss": 0.1769,
"step": 490
},
{
"epoch": 0.2707092582566324,
"grad_norm": 1.25,
"learning_rate": 4.217282404508454e-05,
"loss": 0.1506,
"step": 500
},
{
"epoch": 0.2707092582566324,
"eval_loss": 0.11454860121011734,
"eval_runtime": 58.3011,
"eval_samples_per_second": 8.576,
"eval_steps_per_second": 0.549,
"step": 500
},
{
"epoch": 0.27612344342176504,
"grad_norm": 0.6953125,
"learning_rate": 4.185973700688792e-05,
"loss": 0.1297,
"step": 510
},
{
"epoch": 0.2815376285868977,
"grad_norm": 1.3515625,
"learning_rate": 4.15466499686913e-05,
"loss": 0.1459,
"step": 520
},
{
"epoch": 0.2869518137520303,
"grad_norm": 1.3515625,
"learning_rate": 4.123356293049468e-05,
"loss": 0.1565,
"step": 530
},
{
"epoch": 0.292365998917163,
"grad_norm": 1.078125,
"learning_rate": 4.092047589229806e-05,
"loss": 0.1444,
"step": 540
},
{
"epoch": 0.2977801840822956,
"grad_norm": 1.1484375,
"learning_rate": 4.0607388854101445e-05,
"loss": 0.1581,
"step": 550
},
{
"epoch": 0.30319436924742826,
"grad_norm": 1.1875,
"learning_rate": 4.029430181590482e-05,
"loss": 0.1781,
"step": 560
},
{
"epoch": 0.3086085544125609,
"grad_norm": 0.8359375,
"learning_rate": 3.99812147777082e-05,
"loss": 0.1333,
"step": 570
},
{
"epoch": 0.31402273957769355,
"grad_norm": 0.82421875,
"learning_rate": 3.966812773951158e-05,
"loss": 0.1254,
"step": 580
},
{
"epoch": 0.3194369247428262,
"grad_norm": 1.625,
"learning_rate": 3.9355040701314964e-05,
"loss": 0.2092,
"step": 590
},
{
"epoch": 0.32485110990795885,
"grad_norm": 1.5234375,
"learning_rate": 3.9041953663118345e-05,
"loss": 0.1269,
"step": 600
},
{
"epoch": 0.32485110990795885,
"eval_loss": 0.1084875538945198,
"eval_runtime": 61.8367,
"eval_samples_per_second": 8.086,
"eval_steps_per_second": 0.517,
"step": 600
},
{
"epoch": 0.3302652950730915,
"grad_norm": 0.8203125,
"learning_rate": 3.8728866624921726e-05,
"loss": 0.1448,
"step": 610
},
{
"epoch": 0.33567948023822414,
"grad_norm": 0.90625,
"learning_rate": 3.841577958672511e-05,
"loss": 0.1529,
"step": 620
},
{
"epoch": 0.3410936654033568,
"grad_norm": 0.734375,
"learning_rate": 3.810269254852849e-05,
"loss": 0.1434,
"step": 630
},
{
"epoch": 0.34650785056848943,
"grad_norm": 1.1484375,
"learning_rate": 3.778960551033187e-05,
"loss": 0.1679,
"step": 640
},
{
"epoch": 0.3519220357336221,
"grad_norm": 1.1953125,
"learning_rate": 3.747651847213526e-05,
"loss": 0.1832,
"step": 650
},
{
"epoch": 0.3573362208987547,
"grad_norm": 0.96875,
"learning_rate": 3.716343143393864e-05,
"loss": 0.1634,
"step": 660
},
{
"epoch": 0.36275040606388737,
"grad_norm": 1.4140625,
"learning_rate": 3.685034439574202e-05,
"loss": 0.1799,
"step": 670
},
{
"epoch": 0.36816459122902,
"grad_norm": 0.6875,
"learning_rate": 3.65372573575454e-05,
"loss": 0.1308,
"step": 680
},
{
"epoch": 0.37357877639415266,
"grad_norm": 0.71875,
"learning_rate": 3.6224170319348784e-05,
"loss": 0.1282,
"step": 690
},
{
"epoch": 0.3789929615592853,
"grad_norm": 0.99609375,
"learning_rate": 3.5911083281152166e-05,
"loss": 0.159,
"step": 700
},
{
"epoch": 0.3789929615592853,
"eval_loss": 0.11033277213573456,
"eval_runtime": 59.5923,
"eval_samples_per_second": 8.39,
"eval_steps_per_second": 0.537,
"step": 700
},
{
"epoch": 0.38440714672441795,
"grad_norm": 1.9453125,
"learning_rate": 3.559799624295555e-05,
"loss": 0.1718,
"step": 710
},
{
"epoch": 0.38982133188955065,
"grad_norm": 1.328125,
"learning_rate": 3.528490920475893e-05,
"loss": 0.1584,
"step": 720
},
{
"epoch": 0.3952355170546833,
"grad_norm": 0.431640625,
"learning_rate": 3.497182216656231e-05,
"loss": 0.1556,
"step": 730
},
{
"epoch": 0.40064970221981594,
"grad_norm": 1.484375,
"learning_rate": 3.4658735128365685e-05,
"loss": 0.1379,
"step": 740
},
{
"epoch": 0.4060638873849486,
"grad_norm": 0.84375,
"learning_rate": 3.4345648090169066e-05,
"loss": 0.1542,
"step": 750
},
{
"epoch": 0.41147807255008123,
"grad_norm": 1.2578125,
"learning_rate": 3.403256105197245e-05,
"loss": 0.142,
"step": 760
},
{
"epoch": 0.4168922577152139,
"grad_norm": 0.92578125,
"learning_rate": 3.371947401377583e-05,
"loss": 0.1463,
"step": 770
},
{
"epoch": 0.4223064428803465,
"grad_norm": 1.1484375,
"learning_rate": 3.340638697557921e-05,
"loss": 0.1317,
"step": 780
},
{
"epoch": 0.42772062804547917,
"grad_norm": 1.2890625,
"learning_rate": 3.309329993738259e-05,
"loss": 0.1709,
"step": 790
},
{
"epoch": 0.4331348132106118,
"grad_norm": 0.9296875,
"learning_rate": 3.278021289918597e-05,
"loss": 0.1518,
"step": 800
},
{
"epoch": 0.4331348132106118,
"eval_loss": 0.10848626494407654,
"eval_runtime": 58.5847,
"eval_samples_per_second": 8.535,
"eval_steps_per_second": 0.546,
"step": 800
},
{
"epoch": 0.43854899837574446,
"grad_norm": 0.921875,
"learning_rate": 3.2467125860989355e-05,
"loss": 0.1298,
"step": 810
},
{
"epoch": 0.4439631835408771,
"grad_norm": 1.3046875,
"learning_rate": 3.2154038822792736e-05,
"loss": 0.1291,
"step": 820
},
{
"epoch": 0.44937736870600975,
"grad_norm": 0.70703125,
"learning_rate": 3.184095178459612e-05,
"loss": 0.1715,
"step": 830
},
{
"epoch": 0.4547915538711424,
"grad_norm": 1.375,
"learning_rate": 3.15278647463995e-05,
"loss": 0.1607,
"step": 840
},
{
"epoch": 0.46020573903627504,
"grad_norm": 0.671875,
"learning_rate": 3.121477770820288e-05,
"loss": 0.1261,
"step": 850
},
{
"epoch": 0.4656199242014077,
"grad_norm": 1.3984375,
"learning_rate": 3.090169067000626e-05,
"loss": 0.15,
"step": 860
},
{
"epoch": 0.47103410936654033,
"grad_norm": 0.91015625,
"learning_rate": 3.058860363180964e-05,
"loss": 0.1501,
"step": 870
},
{
"epoch": 0.476448294531673,
"grad_norm": 0.94140625,
"learning_rate": 3.0275516593613024e-05,
"loss": 0.1372,
"step": 880
},
{
"epoch": 0.4818624796968056,
"grad_norm": 1.0,
"learning_rate": 2.9962429555416406e-05,
"loss": 0.1725,
"step": 890
},
{
"epoch": 0.48727666486193827,
"grad_norm": 0.9609375,
"learning_rate": 2.9649342517219787e-05,
"loss": 0.1451,
"step": 900
},
{
"epoch": 0.48727666486193827,
"eval_loss": 0.1076415479183197,
"eval_runtime": 58.8592,
"eval_samples_per_second": 8.495,
"eval_steps_per_second": 0.544,
"step": 900
},
{
"epoch": 0.4926908500270709,
"grad_norm": 1.1796875,
"learning_rate": 2.9336255479023172e-05,
"loss": 0.1423,
"step": 910
},
{
"epoch": 0.49810503519220356,
"grad_norm": 1.109375,
"learning_rate": 2.9023168440826553e-05,
"loss": 0.1326,
"step": 920
},
{
"epoch": 0.5035192203573362,
"grad_norm": 0.9921875,
"learning_rate": 2.8710081402629935e-05,
"loss": 0.141,
"step": 930
},
{
"epoch": 0.5089334055224689,
"grad_norm": 2.015625,
"learning_rate": 2.8396994364433316e-05,
"loss": 0.1311,
"step": 940
},
{
"epoch": 0.5143475906876015,
"grad_norm": 1.234375,
"learning_rate": 2.8083907326236698e-05,
"loss": 0.1154,
"step": 950
},
{
"epoch": 0.5197617758527342,
"grad_norm": 0.78515625,
"learning_rate": 2.777082028804008e-05,
"loss": 0.1433,
"step": 960
},
{
"epoch": 0.5251759610178668,
"grad_norm": 1.546875,
"learning_rate": 2.745773324984346e-05,
"loss": 0.1601,
"step": 970
},
{
"epoch": 0.5305901461829995,
"grad_norm": 1.1796875,
"learning_rate": 2.7144646211646842e-05,
"loss": 0.1343,
"step": 980
},
{
"epoch": 0.5360043313481321,
"grad_norm": 0.953125,
"learning_rate": 2.683155917345022e-05,
"loss": 0.1467,
"step": 990
},
{
"epoch": 0.5414185165132648,
"grad_norm": 0.828125,
"learning_rate": 2.65184721352536e-05,
"loss": 0.1562,
"step": 1000
},
{
"epoch": 0.5414185165132648,
"eval_loss": 0.10366573929786682,
"eval_runtime": 60.3546,
"eval_samples_per_second": 8.284,
"eval_steps_per_second": 0.53,
"step": 1000
},
{
"epoch": 0.5468327016783974,
"grad_norm": 1.578125,
"learning_rate": 2.6205385097056983e-05,
"loss": 0.1299,
"step": 1010
},
{
"epoch": 0.5522468868435301,
"grad_norm": 1.1640625,
"learning_rate": 2.5892298058860364e-05,
"loss": 0.1386,
"step": 1020
},
{
"epoch": 0.5576610720086627,
"grad_norm": 0.9921875,
"learning_rate": 2.5579211020663746e-05,
"loss": 0.1212,
"step": 1030
},
{
"epoch": 0.5630752571737954,
"grad_norm": 0.671875,
"learning_rate": 2.5266123982467127e-05,
"loss": 0.1251,
"step": 1040
},
{
"epoch": 0.568489442338928,
"grad_norm": 1.0625,
"learning_rate": 2.495303694427051e-05,
"loss": 0.1275,
"step": 1050
},
{
"epoch": 0.5739036275040607,
"grad_norm": 0.66796875,
"learning_rate": 2.463994990607389e-05,
"loss": 0.1098,
"step": 1060
},
{
"epoch": 0.5793178126691932,
"grad_norm": 1.265625,
"learning_rate": 2.432686286787727e-05,
"loss": 0.1297,
"step": 1070
},
{
"epoch": 0.584731997834326,
"grad_norm": 1.5625,
"learning_rate": 2.4013775829680653e-05,
"loss": 0.1487,
"step": 1080
},
{
"epoch": 0.5901461829994585,
"grad_norm": 0.94921875,
"learning_rate": 2.3700688791484034e-05,
"loss": 0.1286,
"step": 1090
},
{
"epoch": 0.5955603681645912,
"grad_norm": 0.875,
"learning_rate": 2.3387601753287412e-05,
"loss": 0.1456,
"step": 1100
},
{
"epoch": 0.5955603681645912,
"eval_loss": 0.09888758510351181,
"eval_runtime": 62.4618,
"eval_samples_per_second": 8.005,
"eval_steps_per_second": 0.512,
"step": 1100
},
{
"epoch": 0.6009745533297238,
"grad_norm": 0.88671875,
"learning_rate": 2.3074514715090797e-05,
"loss": 0.1473,
"step": 1110
},
{
"epoch": 0.6063887384948565,
"grad_norm": 1.1953125,
"learning_rate": 2.2761427676894178e-05,
"loss": 0.155,
"step": 1120
},
{
"epoch": 0.6118029236599891,
"grad_norm": 0.83203125,
"learning_rate": 2.244834063869756e-05,
"loss": 0.1251,
"step": 1130
},
{
"epoch": 0.6172171088251218,
"grad_norm": 1.0078125,
"learning_rate": 2.213525360050094e-05,
"loss": 0.1495,
"step": 1140
},
{
"epoch": 0.6226312939902545,
"grad_norm": 2.109375,
"learning_rate": 2.1822166562304323e-05,
"loss": 0.142,
"step": 1150
},
{
"epoch": 0.6280454791553871,
"grad_norm": 1.0859375,
"learning_rate": 2.1509079524107704e-05,
"loss": 0.1303,
"step": 1160
},
{
"epoch": 0.6334596643205198,
"grad_norm": 0.9453125,
"learning_rate": 2.1195992485911085e-05,
"loss": 0.1263,
"step": 1170
},
{
"epoch": 0.6388738494856524,
"grad_norm": 0.84375,
"learning_rate": 2.0882905447714467e-05,
"loss": 0.1133,
"step": 1180
},
{
"epoch": 0.6442880346507851,
"grad_norm": 1.3125,
"learning_rate": 2.0569818409517845e-05,
"loss": 0.1411,
"step": 1190
},
{
"epoch": 0.6497022198159177,
"grad_norm": 0.90234375,
"learning_rate": 2.0256731371321226e-05,
"loss": 0.1571,
"step": 1200
},
{
"epoch": 0.6497022198159177,
"eval_loss": 0.09719575196504593,
"eval_runtime": 61.5385,
"eval_samples_per_second": 8.125,
"eval_steps_per_second": 0.52,
"step": 1200
},
{
"epoch": 0.6551164049810504,
"grad_norm": 0.6328125,
"learning_rate": 1.9943644333124608e-05,
"loss": 0.1521,
"step": 1210
},
{
"epoch": 0.660530590146183,
"grad_norm": 0.70703125,
"learning_rate": 1.963055729492799e-05,
"loss": 0.1128,
"step": 1220
},
{
"epoch": 0.6659447753113157,
"grad_norm": 1.0078125,
"learning_rate": 1.931747025673137e-05,
"loss": 0.134,
"step": 1230
},
{
"epoch": 0.6713589604764483,
"grad_norm": 1.0390625,
"learning_rate": 1.9004383218534755e-05,
"loss": 0.1263,
"step": 1240
},
{
"epoch": 0.676773145641581,
"grad_norm": 0.92578125,
"learning_rate": 1.8691296180338137e-05,
"loss": 0.1273,
"step": 1250
},
{
"epoch": 0.6821873308067136,
"grad_norm": 0.75,
"learning_rate": 1.8378209142141518e-05,
"loss": 0.1373,
"step": 1260
},
{
"epoch": 0.6876015159718463,
"grad_norm": 0.58984375,
"learning_rate": 1.80651221039449e-05,
"loss": 0.1242,
"step": 1270
},
{
"epoch": 0.6930157011369789,
"grad_norm": 0.40234375,
"learning_rate": 1.775203506574828e-05,
"loss": 0.1424,
"step": 1280
},
{
"epoch": 0.6984298863021116,
"grad_norm": 0.984375,
"learning_rate": 1.743894802755166e-05,
"loss": 0.1053,
"step": 1290
},
{
"epoch": 0.7038440714672441,
"grad_norm": 0.97265625,
"learning_rate": 1.712586098935504e-05,
"loss": 0.1195,
"step": 1300
},
{
"epoch": 0.7038440714672441,
"eval_loss": 0.09898315370082855,
"eval_runtime": 59.819,
"eval_samples_per_second": 8.359,
"eval_steps_per_second": 0.535,
"step": 1300
},
{
"epoch": 0.7092582566323768,
"grad_norm": 0.5625,
"learning_rate": 1.681277395115842e-05,
"loss": 0.1339,
"step": 1310
},
{
"epoch": 0.7146724417975094,
"grad_norm": 0.92578125,
"learning_rate": 1.6499686912961803e-05,
"loss": 0.155,
"step": 1320
},
{
"epoch": 0.7200866269626421,
"grad_norm": 1.421875,
"learning_rate": 1.6186599874765184e-05,
"loss": 0.1275,
"step": 1330
},
{
"epoch": 0.7255008121277747,
"grad_norm": 1.25,
"learning_rate": 1.5873512836568566e-05,
"loss": 0.1454,
"step": 1340
},
{
"epoch": 0.7309149972929074,
"grad_norm": 1.0078125,
"learning_rate": 1.5560425798371947e-05,
"loss": 0.1184,
"step": 1350
},
{
"epoch": 0.73632918245804,
"grad_norm": 0.546875,
"learning_rate": 1.5247338760175329e-05,
"loss": 0.1231,
"step": 1360
},
{
"epoch": 0.7417433676231727,
"grad_norm": 0.77734375,
"learning_rate": 1.4934251721978712e-05,
"loss": 0.1245,
"step": 1370
},
{
"epoch": 0.7471575527883053,
"grad_norm": 1.1484375,
"learning_rate": 1.4621164683782093e-05,
"loss": 0.1388,
"step": 1380
},
{
"epoch": 0.752571737953438,
"grad_norm": 1.234375,
"learning_rate": 1.4308077645585475e-05,
"loss": 0.1599,
"step": 1390
},
{
"epoch": 0.7579859231185706,
"grad_norm": 1.2265625,
"learning_rate": 1.3994990607388856e-05,
"loss": 0.1481,
"step": 1400
},
{
"epoch": 0.7579859231185706,
"eval_loss": 0.09811025857925415,
"eval_runtime": 58.4243,
"eval_samples_per_second": 8.558,
"eval_steps_per_second": 0.548,
"step": 1400
},
{
"epoch": 0.7634001082837033,
"grad_norm": 0.72265625,
"learning_rate": 1.3681903569192236e-05,
"loss": 0.1228,
"step": 1410
},
{
"epoch": 0.7688142934488359,
"grad_norm": 0.8359375,
"learning_rate": 1.3368816530995617e-05,
"loss": 0.1356,
"step": 1420
},
{
"epoch": 0.7742284786139686,
"grad_norm": 1.046875,
"learning_rate": 1.3055729492798999e-05,
"loss": 0.1431,
"step": 1430
},
{
"epoch": 0.7796426637791013,
"grad_norm": 0.9140625,
"learning_rate": 1.274264245460238e-05,
"loss": 0.1154,
"step": 1440
},
{
"epoch": 0.7850568489442339,
"grad_norm": 0.76953125,
"learning_rate": 1.2429555416405761e-05,
"loss": 0.1106,
"step": 1450
},
{
"epoch": 0.7904710341093666,
"grad_norm": 0.8046875,
"learning_rate": 1.2116468378209143e-05,
"loss": 0.1172,
"step": 1460
},
{
"epoch": 0.7958852192744992,
"grad_norm": 0.7578125,
"learning_rate": 1.1803381340012524e-05,
"loss": 0.1424,
"step": 1470
},
{
"epoch": 0.8012994044396319,
"grad_norm": 1.0,
"learning_rate": 1.1490294301815906e-05,
"loss": 0.1328,
"step": 1480
},
{
"epoch": 0.8067135896047645,
"grad_norm": 2.171875,
"learning_rate": 1.1177207263619287e-05,
"loss": 0.1363,
"step": 1490
},
{
"epoch": 0.8121277747698972,
"grad_norm": 0.88671875,
"learning_rate": 1.0864120225422668e-05,
"loss": 0.1255,
"step": 1500
},
{
"epoch": 0.8121277747698972,
"eval_loss": 0.0966864824295044,
"eval_runtime": 61.9578,
"eval_samples_per_second": 8.07,
"eval_steps_per_second": 0.516,
"step": 1500
},
{
"epoch": 0.8175419599350298,
"grad_norm": 1.1171875,
"learning_rate": 1.0551033187226048e-05,
"loss": 0.1397,
"step": 1510
},
{
"epoch": 0.8229561451001625,
"grad_norm": 1.421875,
"learning_rate": 1.023794614902943e-05,
"loss": 0.1289,
"step": 1520
},
{
"epoch": 0.828370330265295,
"grad_norm": 1.34375,
"learning_rate": 9.924859110832813e-06,
"loss": 0.1231,
"step": 1530
},
{
"epoch": 0.8337845154304278,
"grad_norm": 1.15625,
"learning_rate": 9.611772072636194e-06,
"loss": 0.1477,
"step": 1540
},
{
"epoch": 0.8391987005955603,
"grad_norm": 0.59765625,
"learning_rate": 9.298685034439576e-06,
"loss": 0.1228,
"step": 1550
},
{
"epoch": 0.844612885760693,
"grad_norm": 1.59375,
"learning_rate": 8.985597996242955e-06,
"loss": 0.1745,
"step": 1560
},
{
"epoch": 0.8500270709258256,
"grad_norm": 0.859375,
"learning_rate": 8.672510958046337e-06,
"loss": 0.1565,
"step": 1570
},
{
"epoch": 0.8554412560909583,
"grad_norm": 0.95703125,
"learning_rate": 8.359423919849718e-06,
"loss": 0.1503,
"step": 1580
},
{
"epoch": 0.8608554412560909,
"grad_norm": 1.6875,
"learning_rate": 8.0463368816531e-06,
"loss": 0.1569,
"step": 1590
},
{
"epoch": 0.8662696264212236,
"grad_norm": 0.96875,
"learning_rate": 7.733249843456483e-06,
"loss": 0.1408,
"step": 1600
},
{
"epoch": 0.8662696264212236,
"eval_loss": 0.09672338515520096,
"eval_runtime": 60.7387,
"eval_samples_per_second": 8.232,
"eval_steps_per_second": 0.527,
"step": 1600
},
{
"epoch": 0.8716838115863562,
"grad_norm": 1.078125,
"learning_rate": 7.420162805259863e-06,
"loss": 0.1313,
"step": 1610
},
{
"epoch": 0.8770979967514889,
"grad_norm": 0.482421875,
"learning_rate": 7.107075767063244e-06,
"loss": 0.1506,
"step": 1620
},
{
"epoch": 0.8825121819166215,
"grad_norm": 0.796875,
"learning_rate": 6.793988728866625e-06,
"loss": 0.1274,
"step": 1630
},
{
"epoch": 0.8879263670817542,
"grad_norm": 1.515625,
"learning_rate": 6.4809016906700065e-06,
"loss": 0.1471,
"step": 1640
},
{
"epoch": 0.8933405522468868,
"grad_norm": 0.6875,
"learning_rate": 6.167814652473388e-06,
"loss": 0.142,
"step": 1650
},
{
"epoch": 0.8987547374120195,
"grad_norm": 1.296875,
"learning_rate": 5.854727614276769e-06,
"loss": 0.1084,
"step": 1660
},
{
"epoch": 0.9041689225771521,
"grad_norm": 0.97265625,
"learning_rate": 5.54164057608015e-06,
"loss": 0.1044,
"step": 1670
},
{
"epoch": 0.9095831077422848,
"grad_norm": 0.96484375,
"learning_rate": 5.228553537883532e-06,
"loss": 0.147,
"step": 1680
},
{
"epoch": 0.9149972929074174,
"grad_norm": 0.4765625,
"learning_rate": 4.9154664996869136e-06,
"loss": 0.1254,
"step": 1690
},
{
"epoch": 0.9204114780725501,
"grad_norm": 0.7109375,
"learning_rate": 4.602379461490294e-06,
"loss": 0.1208,
"step": 1700
},
{
"epoch": 0.9204114780725501,
"eval_loss": 0.09627044945955276,
"eval_runtime": 61.5631,
"eval_samples_per_second": 8.122,
"eval_steps_per_second": 0.52,
"step": 1700
},
{
"epoch": 0.9258256632376828,
"grad_norm": 1.015625,
"learning_rate": 4.289292423293676e-06,
"loss": 0.1468,
"step": 1710
},
{
"epoch": 0.9312398484028154,
"grad_norm": 1.0546875,
"learning_rate": 3.976205385097057e-06,
"loss": 0.1231,
"step": 1720
},
{
"epoch": 0.9366540335679481,
"grad_norm": 1.4453125,
"learning_rate": 3.6631183469004384e-06,
"loss": 0.1548,
"step": 1730
},
{
"epoch": 0.9420682187330807,
"grad_norm": 1.125,
"learning_rate": 3.35003130870382e-06,
"loss": 0.1332,
"step": 1740
},
{
"epoch": 0.9474824038982134,
"grad_norm": 0.84765625,
"learning_rate": 3.036944270507201e-06,
"loss": 0.1382,
"step": 1750
},
{
"epoch": 0.952896589063346,
"grad_norm": 0.72265625,
"learning_rate": 2.7238572323105826e-06,
"loss": 0.1203,
"step": 1760
},
{
"epoch": 0.9583107742284787,
"grad_norm": 0.96875,
"learning_rate": 2.410770194113964e-06,
"loss": 0.1206,
"step": 1770
},
{
"epoch": 0.9637249593936112,
"grad_norm": 1.2578125,
"learning_rate": 2.0976831559173454e-06,
"loss": 0.1512,
"step": 1780
},
{
"epoch": 0.969139144558744,
"grad_norm": 1.21875,
"learning_rate": 1.7845961177207264e-06,
"loss": 0.1522,
"step": 1790
},
{
"epoch": 0.9745533297238765,
"grad_norm": 0.63671875,
"learning_rate": 1.4715090795241078e-06,
"loss": 0.1228,
"step": 1800
},
{
"epoch": 0.9745533297238765,
"eval_loss": 0.09588468819856644,
"eval_runtime": 61.1963,
"eval_samples_per_second": 8.17,
"eval_steps_per_second": 0.523,
"step": 1800
},
{
"epoch": 0.9799675148890092,
"grad_norm": 0.609375,
"learning_rate": 1.1584220413274892e-06,
"loss": 0.1349,
"step": 1810
},
{
"epoch": 0.9853817000541418,
"grad_norm": 0.84765625,
"learning_rate": 8.453350031308704e-07,
"loss": 0.1268,
"step": 1820
},
{
"epoch": 0.9907958852192745,
"grad_norm": 1.4609375,
"learning_rate": 5.322479649342517e-07,
"loss": 0.1416,
"step": 1830
},
{
"epoch": 0.9962100703844071,
"grad_norm": 1.03125,
"learning_rate": 2.1916092673763307e-07,
"loss": 0.1308,
"step": 1840
}
],
"logging_steps": 10,
"max_steps": 1847,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1050860758570842e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}