heuristic_snyder / checkpoint-251 /trainer_state.json
tomekkorbak's picture
Training in progress, step 251
d634acc
raw
history blame
No virus
54 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.10626587637595258,
"global_step": 251,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.1666666666666665e-05,
"loss": 3.0643,
"theoretical_loss": 3.321567680436603,
"tokens_seen": 2990538752
},
{
"epoch": 0.0,
"learning_rate": 8.333333333333333e-05,
"loss": 3.0798,
"theoretical_loss": 3.3215564803546,
"tokens_seen": 2990669824
},
{
"epoch": 0.0,
"learning_rate": 0.000125,
"loss": 2.8688,
"theoretical_loss": 3.321545280900887,
"tokens_seen": 2990800896
},
{
"epoch": 0.0,
"learning_rate": 0.00016666666666666666,
"loss": 2.7194,
"theoretical_loss": 3.3215340820754022,
"tokens_seen": 2990931968
},
{
"epoch": 0.0,
"learning_rate": 0.00020833333333333335,
"loss": 2.6193,
"theoretical_loss": 3.3215228838780817,
"tokens_seen": 2991063040
},
{
"epoch": 0.0,
"learning_rate": 0.00025,
"loss": 2.8571,
"theoretical_loss": 3.3215116863088636,
"tokens_seen": 2991194112
},
{
"epoch": 0.0,
"learning_rate": 0.0002916666666666667,
"loss": 2.7571,
"theoretical_loss": 3.3215004893676854,
"tokens_seen": 2991325184
},
{
"epoch": 0.0,
"learning_rate": 0.0003333333333333333,
"loss": 2.8877,
"theoretical_loss": 3.321489293054483,
"tokens_seen": 2991456256
},
{
"epoch": 0.0,
"learning_rate": 0.000375,
"loss": 2.714,
"theoretical_loss": 3.321478097369195,
"tokens_seen": 2991587328
},
{
"epoch": 0.0,
"learning_rate": 0.0004166666666666667,
"loss": 2.6564,
"theoretical_loss": 3.321466902311758,
"tokens_seen": 2991718400
},
{
"epoch": 0.0,
"learning_rate": 0.0004583333333333333,
"loss": 2.5638,
"theoretical_loss": 3.3214557078821096,
"tokens_seen": 2991849472
},
{
"epoch": 0.01,
"learning_rate": 0.0005,
"loss": 2.4354,
"theoretical_loss": 3.321444514080187,
"tokens_seen": 2991980544
},
{
"epoch": 0.01,
"objective/train/docs_used": 1640856,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5749809741973877,
"objective/train/theoretical_loss": 3.321438917414603,
"objective/train/tokens_used": 22097376,
"theoretical_loss": 3.321438917414603,
"tokens_seen": 2992046080
},
{
"epoch": 0.01,
"learning_rate": 0.0005416666666666666,
"loss": 2.5713,
"theoretical_loss": 3.321433320905927,
"tokens_seen": 2992111616
},
{
"epoch": 0.01,
"learning_rate": 0.0005833333333333334,
"loss": 2.4812,
"theoretical_loss": 3.3214221283592678,
"tokens_seen": 2992242688
},
{
"epoch": 0.01,
"learning_rate": 0.000625,
"loss": 2.7622,
"theoretical_loss": 3.321410936440146,
"tokens_seen": 2992373760
},
{
"epoch": 0.01,
"learning_rate": 0.0006666666666666666,
"loss": 2.609,
"theoretical_loss": 3.3213997451485,
"tokens_seen": 2992504832
},
{
"epoch": 0.01,
"learning_rate": 0.0007083333333333334,
"loss": 2.4537,
"theoretical_loss": 3.3213885544842654,
"tokens_seen": 2992635904
},
{
"epoch": 0.01,
"learning_rate": 0.00075,
"loss": 2.4831,
"theoretical_loss": 3.321377364447381,
"tokens_seen": 2992766976
},
{
"epoch": 0.01,
"learning_rate": 0.0007916666666666666,
"loss": 2.5607,
"theoretical_loss": 3.3213661750377836,
"tokens_seen": 2992898048
},
{
"epoch": 0.01,
"learning_rate": 0.0008333333333333334,
"loss": 2.5759,
"theoretical_loss": 3.3213549862554106,
"tokens_seen": 2993029120
},
{
"epoch": 0.01,
"learning_rate": 0.000875,
"loss": 2.3106,
"theoretical_loss": 3.3213437981001994,
"tokens_seen": 2993160192
},
{
"epoch": 0.01,
"learning_rate": 0.0009166666666666666,
"loss": 2.5471,
"theoretical_loss": 3.3213326105720875,
"tokens_seen": 2993291264
},
{
"epoch": 0.01,
"learning_rate": 0.0009583333333333334,
"loss": 2.602,
"theoretical_loss": 3.3213214236710122,
"tokens_seen": 2993422336
},
{
"epoch": 0.01,
"learning_rate": 0.001,
"loss": 2.5877,
"theoretical_loss": 3.321310237396911,
"tokens_seen": 2993553408
},
{
"epoch": 0.01,
"objective/train/docs_used": 1641461,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.557373523712158,
"objective/train/theoretical_loss": 3.3212990517497207,
"objective/train/tokens_used": 23735776,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 0.0009995722840034217,
"loss": 2.7042,
"theoretical_loss": 3.3212990517497207,
"tokens_seen": 2993684480
},
{
"epoch": 0.01,
"learning_rate": 0.0009991445680068436,
"loss": 2.5234,
"theoretical_loss": 3.3212878667293797,
"tokens_seen": 2993815552
},
{
"epoch": 0.01,
"learning_rate": 0.0009987168520102653,
"loss": 2.5502,
"theoretical_loss": 3.321276682335825,
"tokens_seen": 2993946624
},
{
"epoch": 0.01,
"learning_rate": 0.000998289136013687,
"loss": 2.6849,
"theoretical_loss": 3.3212654985689936,
"tokens_seen": 2994077696
},
{
"epoch": 0.01,
"learning_rate": 0.0009978614200171086,
"loss": 2.6348,
"theoretical_loss": 3.3212543154288237,
"tokens_seen": 2994208768
},
{
"epoch": 0.01,
"learning_rate": 0.0009974337040205303,
"loss": 2.6793,
"theoretical_loss": 3.3212431329152525,
"tokens_seen": 2994339840
},
{
"epoch": 0.01,
"learning_rate": 0.0009970059880239522,
"loss": 2.6212,
"theoretical_loss": 3.321231951028217,
"tokens_seen": 2994470912
},
{
"epoch": 0.01,
"learning_rate": 0.0009965782720273739,
"loss": 2.629,
"theoretical_loss": 3.3212207697676552,
"tokens_seen": 2994601984
},
{
"epoch": 0.01,
"learning_rate": 0.0009961505560307955,
"loss": 2.5865,
"theoretical_loss": 3.3212095891335043,
"tokens_seen": 2994733056
},
{
"epoch": 0.01,
"learning_rate": 0.0009957228400342174,
"loss": 2.667,
"theoretical_loss": 3.321198409125702,
"tokens_seen": 2994864128
},
{
"epoch": 0.01,
"learning_rate": 0.000995295124037639,
"loss": 2.55,
"theoretical_loss": 3.321187229744186,
"tokens_seen": 2994995200
},
{
"epoch": 0.02,
"learning_rate": 0.0009948674080410608,
"loss": 2.6324,
"theoretical_loss": 3.321176050988893,
"tokens_seen": 2995126272
},
{
"epoch": 0.02,
"learning_rate": 0.0009944396920444824,
"loss": 2.8406,
"theoretical_loss": 3.3211648728597614,
"tokens_seen": 2995257344
},
{
"epoch": 0.02,
"objective/train/docs_used": 1642666,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8021185398101807,
"objective/train/theoretical_loss": 3.3211592840299864,
"objective/train/tokens_used": 25374176,
"theoretical_loss": 3.3211592840299864,
"tokens_seen": 2995322880
},
{
"epoch": 0.02,
"learning_rate": 0.0009940119760479041,
"loss": 2.5438,
"theoretical_loss": 3.3211536953567284,
"tokens_seen": 2995388416
},
{
"epoch": 0.02,
"learning_rate": 0.000993584260051326,
"loss": 2.7618,
"theoretical_loss": 3.321142518479731,
"tokens_seen": 2995519488
},
{
"epoch": 0.02,
"learning_rate": 0.0009931565440547477,
"loss": 2.6638,
"theoretical_loss": 3.321131342228708,
"tokens_seen": 2995650560
},
{
"epoch": 0.02,
"learning_rate": 0.0009927288280581694,
"loss": 2.7287,
"theoretical_loss": 3.321120166603596,
"tokens_seen": 2995781632
},
{
"epoch": 0.02,
"learning_rate": 0.000992301112061591,
"loss": 2.5832,
"theoretical_loss": 3.3211089916043326,
"tokens_seen": 2995912704
},
{
"epoch": 0.02,
"learning_rate": 0.0009918733960650127,
"loss": 2.6355,
"theoretical_loss": 3.3210978172308554,
"tokens_seen": 2996043776
},
{
"epoch": 0.02,
"learning_rate": 0.0009914456800684346,
"loss": 2.5139,
"theoretical_loss": 3.3210866434831026,
"tokens_seen": 2996174848
},
{
"epoch": 0.02,
"learning_rate": 0.0009910179640718563,
"loss": 2.5879,
"theoretical_loss": 3.3210754703610106,
"tokens_seen": 2996305920
},
{
"epoch": 0.02,
"learning_rate": 0.0009905902480752782,
"loss": 2.5935,
"theoretical_loss": 3.321064297864518,
"tokens_seen": 2996436992
},
{
"epoch": 0.02,
"learning_rate": 0.0009901625320786998,
"loss": 2.6176,
"theoretical_loss": 3.3210531259935627,
"tokens_seen": 2996568064
},
{
"epoch": 0.02,
"learning_rate": 0.0009897348160821215,
"loss": 2.6405,
"theoretical_loss": 3.321041954748081,
"tokens_seen": 2996699136
},
{
"epoch": 0.02,
"learning_rate": 0.0009893071000855432,
"loss": 2.7274,
"theoretical_loss": 3.321030784128012,
"tokens_seen": 2996830208
},
{
"epoch": 0.02,
"objective/train/docs_used": 1643300,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.023165464401245,
"objective/train/theoretical_loss": 3.321019614133292,
"objective/train/tokens_used": 27012576,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 0.0009888793840889649,
"loss": 2.7003,
"theoretical_loss": 3.321019614133292,
"tokens_seen": 2996961280
},
{
"epoch": 0.02,
"learning_rate": 0.0009884516680923865,
"loss": 2.7436,
"theoretical_loss": 3.3210084447638595,
"tokens_seen": 2997092352
},
{
"epoch": 0.02,
"learning_rate": 0.0009880239520958084,
"loss": 2.5873,
"theoretical_loss": 3.320997276019652,
"tokens_seen": 2997223424
},
{
"epoch": 0.02,
"learning_rate": 0.00098759623609923,
"loss": 2.5974,
"theoretical_loss": 3.3209861079006067,
"tokens_seen": 2997354496
},
{
"epoch": 0.02,
"learning_rate": 0.000987168520102652,
"loss": 2.5806,
"theoretical_loss": 3.320974940406662,
"tokens_seen": 2997485568
},
{
"epoch": 0.02,
"learning_rate": 0.0009867408041060737,
"loss": 2.6771,
"theoretical_loss": 3.320963773537755,
"tokens_seen": 2997616640
},
{
"epoch": 0.02,
"learning_rate": 0.0009863130881094953,
"loss": 2.7313,
"theoretical_loss": 3.320952607293824,
"tokens_seen": 2997747712
},
{
"epoch": 0.02,
"learning_rate": 0.000985885372112917,
"loss": 2.7302,
"theoretical_loss": 3.320941441674806,
"tokens_seen": 2997878784
},
{
"epoch": 0.02,
"learning_rate": 0.0009854576561163387,
"loss": 2.893,
"theoretical_loss": 3.320930276680639,
"tokens_seen": 2998009856
},
{
"epoch": 0.02,
"learning_rate": 0.0009850299401197606,
"loss": 2.6886,
"theoretical_loss": 3.3209191123112607,
"tokens_seen": 2998140928
},
{
"epoch": 0.03,
"learning_rate": 0.0009846022241231823,
"loss": 2.7801,
"theoretical_loss": 3.320907948566609,
"tokens_seen": 2998272000
},
{
"epoch": 0.03,
"learning_rate": 0.000984174508126604,
"loss": 2.6538,
"theoretical_loss": 3.3208967854466214,
"tokens_seen": 2998403072
},
{
"epoch": 0.03,
"learning_rate": 0.0009837467921300258,
"loss": 2.5705,
"theoretical_loss": 3.3208856229512356,
"tokens_seen": 2998534144
},
{
"epoch": 0.03,
"objective/train/docs_used": 1644380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.542499542236328,
"objective/train/theoretical_loss": 3.320880041937749,
"objective/train/tokens_used": 28650976,
"theoretical_loss": 3.320880041937749,
"tokens_seen": 2998599680
},
{
"epoch": 0.03,
"learning_rate": 0.0009833190761334475,
"loss": 2.5074,
"theoretical_loss": 3.3208744610803898,
"tokens_seen": 2998665216
},
{
"epoch": 0.03,
"learning_rate": 0.0009828913601368692,
"loss": 2.6151,
"theoretical_loss": 3.320863299834021,
"tokens_seen": 2998796288
},
{
"epoch": 0.03,
"learning_rate": 0.0009824636441402908,
"loss": 2.6994,
"theoretical_loss": 3.320852139212068,
"tokens_seen": 2998927360
},
{
"epoch": 0.03,
"learning_rate": 0.0009820359281437125,
"loss": 2.568,
"theoretical_loss": 3.3208409792144677,
"tokens_seen": 2999058432
},
{
"epoch": 0.03,
"learning_rate": 0.0009816082121471344,
"loss": 2.5552,
"theoretical_loss": 3.320829819841158,
"tokens_seen": 2999189504
},
{
"epoch": 0.03,
"learning_rate": 0.000981180496150556,
"loss": 2.6719,
"theoretical_loss": 3.320818661092077,
"tokens_seen": 2999320576
},
{
"epoch": 0.03,
"learning_rate": 0.0009807527801539778,
"loss": 2.5567,
"theoretical_loss": 3.3208075029671624,
"tokens_seen": 2999451648
},
{
"epoch": 0.03,
"learning_rate": 0.0009803250641573994,
"loss": 2.5511,
"theoretical_loss": 3.320796345466352,
"tokens_seen": 2999582720
},
{
"epoch": 0.03,
"learning_rate": 0.0009798973481608211,
"loss": 2.6608,
"theoretical_loss": 3.320785188589584,
"tokens_seen": 2999713792
},
{
"epoch": 0.03,
"learning_rate": 0.000979469632164243,
"loss": 2.5947,
"theoretical_loss": 3.3207740323367956,
"tokens_seen": 2999844864
},
{
"epoch": 0.03,
"learning_rate": 0.0009790419161676647,
"loss": 2.6511,
"theoretical_loss": 3.3207628767079242,
"tokens_seen": 2999975936
},
{
"epoch": 0.03,
"learning_rate": 0.0009786142001710863,
"loss": 2.5573,
"theoretical_loss": 3.3207517217029094,
"tokens_seen": 3000107008
},
{
"epoch": 0.03,
"objective/train/docs_used": 1645056,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4763801097869873,
"objective/train/theoretical_loss": 3.3207405673216877,
"objective/train/tokens_used": 30289376,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 0.0009781864841745082,
"loss": 2.6745,
"theoretical_loss": 3.3207405673216877,
"tokens_seen": 3000238080
},
{
"epoch": 0.03,
"learning_rate": 0.00097775876817793,
"loss": 2.697,
"theoretical_loss": 3.320729413564197,
"tokens_seen": 3000369152
},
{
"epoch": 0.03,
"learning_rate": 0.0009773310521813516,
"loss": 2.6853,
"theoretical_loss": 3.3207182604303753,
"tokens_seen": 3000500224
},
{
"epoch": 0.03,
"learning_rate": 0.0009769033361847733,
"loss": 2.3445,
"theoretical_loss": 3.320707107920161,
"tokens_seen": 3000631296
},
{
"epoch": 0.03,
"learning_rate": 0.000976475620188195,
"loss": 2.6763,
"theoretical_loss": 3.3206959560334917,
"tokens_seen": 3000762368
},
{
"epoch": 0.03,
"learning_rate": 0.0009760479041916168,
"loss": 2.5198,
"theoretical_loss": 3.320684804770305,
"tokens_seen": 3000893440
},
{
"epoch": 0.03,
"learning_rate": 0.0009756201881950385,
"loss": 2.7,
"theoretical_loss": 3.3206736541305393,
"tokens_seen": 3001024512
},
{
"epoch": 0.03,
"learning_rate": 0.0009751924721984602,
"loss": 2.6958,
"theoretical_loss": 3.3206625041141318,
"tokens_seen": 3001155584
},
{
"epoch": 0.04,
"learning_rate": 0.000974764756201882,
"loss": 2.6457,
"theoretical_loss": 3.3206513547210212,
"tokens_seen": 3001286656
},
{
"epoch": 0.04,
"learning_rate": 0.0009743370402053036,
"loss": 2.7946,
"theoretical_loss": 3.320640205951145,
"tokens_seen": 3001417728
},
{
"epoch": 0.04,
"learning_rate": 0.0009739093242087254,
"loss": 2.6682,
"theoretical_loss": 3.3206290578044415,
"tokens_seen": 3001548800
},
{
"epoch": 0.04,
"learning_rate": 0.0009734816082121472,
"loss": 2.5484,
"theoretical_loss": 3.3206179102808484,
"tokens_seen": 3001679872
},
{
"epoch": 0.04,
"learning_rate": 0.0009730538922155689,
"loss": 2.6724,
"theoretical_loss": 3.3206067633803036,
"tokens_seen": 3001810944
},
{
"epoch": 0.04,
"objective/train/docs_used": 1646327,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.547891855239868,
"objective/train/theoretical_loss": 3.320601190163655,
"objective/train/tokens_used": 31927776,
"theoretical_loss": 3.320601190163655,
"tokens_seen": 3001876480
},
{
"epoch": 0.04,
"learning_rate": 0.0009726261762189907,
"loss": 2.5267,
"theoretical_loss": 3.320595617102745,
"tokens_seen": 3001942016
},
{
"epoch": 0.04,
"learning_rate": 0.0009721984602224123,
"loss": 2.6548,
"theoretical_loss": 3.320584471448111,
"tokens_seen": 3002073088
},
{
"epoch": 0.04,
"learning_rate": 0.0009717707442258341,
"loss": 2.5147,
"theoretical_loss": 3.3205733264163393,
"tokens_seen": 3002204160
},
{
"epoch": 0.04,
"learning_rate": 0.0009713430282292558,
"loss": 2.4505,
"theoretical_loss": 3.320562182007368,
"tokens_seen": 3002335232
},
{
"epoch": 0.04,
"learning_rate": 0.0009709153122326775,
"loss": 2.6305,
"theoretical_loss": 3.320551038221135,
"tokens_seen": 3002466304
},
{
"epoch": 0.04,
"learning_rate": 0.0009704875962360993,
"loss": 2.5482,
"theoretical_loss": 3.3205398950575784,
"tokens_seen": 3002597376
},
{
"epoch": 0.04,
"learning_rate": 0.0009700598802395209,
"loss": 2.7266,
"theoretical_loss": 3.320528752516636,
"tokens_seen": 3002728448
},
{
"epoch": 0.04,
"learning_rate": 0.0009696321642429427,
"loss": 2.5155,
"theoretical_loss": 3.3205176105982463,
"tokens_seen": 3002859520
},
{
"epoch": 0.04,
"learning_rate": 0.0009692044482463645,
"loss": 2.7628,
"theoretical_loss": 3.320506469302347,
"tokens_seen": 3002990592
},
{
"epoch": 0.04,
"learning_rate": 0.0009687767322497862,
"loss": 2.6802,
"theoretical_loss": 3.3204953286288763,
"tokens_seen": 3003121664
},
{
"epoch": 0.04,
"learning_rate": 0.000968349016253208,
"loss": 2.7921,
"theoretical_loss": 3.3204841885777725,
"tokens_seen": 3003252736
},
{
"epoch": 0.04,
"learning_rate": 0.0009679213002566296,
"loss": 2.6088,
"theoretical_loss": 3.3204730491489727,
"tokens_seen": 3003383808
},
{
"epoch": 0.04,
"objective/train/docs_used": 1647543,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5402848720550537,
"objective/train/theoretical_loss": 3.3204619103424164,
"objective/train/tokens_used": 33566176,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 0.0009674935842600513,
"loss": 2.6446,
"theoretical_loss": 3.3204619103424164,
"tokens_seen": 3003514880
},
{
"epoch": 0.04,
"learning_rate": 0.0009670658682634731,
"loss": 2.5202,
"theoretical_loss": 3.3204507721580403,
"tokens_seen": 3003645952
},
{
"epoch": 0.04,
"learning_rate": 0.0009666381522668948,
"loss": 2.4832,
"theoretical_loss": 3.3204396345957834,
"tokens_seen": 3003777024
},
{
"epoch": 0.04,
"learning_rate": 0.0009662104362703165,
"loss": 2.6544,
"theoretical_loss": 3.320428497655584,
"tokens_seen": 3003908096
},
{
"epoch": 0.04,
"learning_rate": 0.0009657827202737382,
"loss": 2.7507,
"theoretical_loss": 3.320417361337379,
"tokens_seen": 3004039168
},
{
"epoch": 0.04,
"learning_rate": 0.00096535500427716,
"loss": 2.5786,
"theoretical_loss": 3.3204062256411078,
"tokens_seen": 3004170240
},
{
"epoch": 0.04,
"learning_rate": 0.0009649272882805818,
"loss": 2.6649,
"theoretical_loss": 3.320395090566708,
"tokens_seen": 3004301312
},
{
"epoch": 0.05,
"learning_rate": 0.0009644995722840035,
"loss": 2.6338,
"theoretical_loss": 3.3203839561141173,
"tokens_seen": 3004432384
},
{
"epoch": 0.05,
"learning_rate": 0.0009640718562874252,
"loss": 2.664,
"theoretical_loss": 3.320372822283275,
"tokens_seen": 3004563456
},
{
"epoch": 0.05,
"learning_rate": 0.0009636441402908469,
"loss": 2.6909,
"theoretical_loss": 3.3203616890741183,
"tokens_seen": 3004694528
},
{
"epoch": 0.05,
"learning_rate": 0.0009632164242942686,
"loss": 2.4478,
"theoretical_loss": 3.3203505564865856,
"tokens_seen": 3004825600
},
{
"epoch": 0.05,
"learning_rate": 0.0009627887082976904,
"loss": 2.6265,
"theoretical_loss": 3.3203394245206153,
"tokens_seen": 3004956672
},
{
"epoch": 0.05,
"learning_rate": 0.000962360992301112,
"loss": 2.5755,
"theoretical_loss": 3.320328293176145,
"tokens_seen": 3005087744
},
{
"epoch": 0.05,
"objective/train/docs_used": 1648109,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5592408180236816,
"objective/train/theoretical_loss": 3.3203227277369534,
"objective/train/tokens_used": 35204576,
"theoretical_loss": 3.3203227277369534,
"tokens_seen": 3005153280
},
{
"epoch": 0.05,
"learning_rate": 0.0009619332763045337,
"loss": 2.567,
"theoretical_loss": 3.320317162453114,
"tokens_seen": 3005218816
},
{
"epoch": 0.05,
"learning_rate": 0.0009615055603079555,
"loss": 2.6035,
"theoretical_loss": 3.3203060323514593,
"tokens_seen": 3005349888
},
{
"epoch": 0.05,
"learning_rate": 0.0009610778443113773,
"loss": 2.6209,
"theoretical_loss": 3.3202949028711197,
"tokens_seen": 3005480960
},
{
"epoch": 0.05,
"learning_rate": 0.0009606501283147991,
"loss": 2.522,
"theoretical_loss": 3.3202837740120335,
"tokens_seen": 3005612032
},
{
"epoch": 0.05,
"learning_rate": 0.0009602224123182207,
"loss": 2.5764,
"theoretical_loss": 3.3202726457741387,
"tokens_seen": 3005743104
},
{
"epoch": 0.05,
"learning_rate": 0.0009597946963216424,
"loss": 2.6805,
"theoretical_loss": 3.320261518157374,
"tokens_seen": 3005874176
},
{
"epoch": 0.05,
"learning_rate": 0.0009593669803250642,
"loss": 2.4835,
"theoretical_loss": 3.3202503911616765,
"tokens_seen": 3006005248
},
{
"epoch": 0.05,
"learning_rate": 0.0009589392643284859,
"loss": 2.5907,
"theoretical_loss": 3.320239264786986,
"tokens_seen": 3006136320
},
{
"epoch": 0.05,
"learning_rate": 0.0009585115483319077,
"loss": 2.617,
"theoretical_loss": 3.3202281390332393,
"tokens_seen": 3006267392
},
{
"epoch": 0.05,
"learning_rate": 0.0009580838323353293,
"loss": 2.5027,
"theoretical_loss": 3.320217013900376,
"tokens_seen": 3006398464
},
{
"epoch": 0.05,
"learning_rate": 0.000957656116338751,
"loss": 2.6857,
"theoretical_loss": 3.3202058893883333,
"tokens_seen": 3006529536
},
{
"epoch": 0.05,
"learning_rate": 0.0009572284003421729,
"loss": 2.6411,
"theoretical_loss": 3.3201947654970505,
"tokens_seen": 3006660608
},
{
"epoch": 0.05,
"objective/train/docs_used": 1649212,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.7101857662200928,
"objective/train/theoretical_loss": 3.320183642226465,
"objective/train/tokens_used": 36842976,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 0.0009568006843455946,
"loss": 2.5369,
"theoretical_loss": 3.320183642226465,
"tokens_seen": 3006791680
},
{
"epoch": 0.05,
"learning_rate": 0.0009563729683490164,
"loss": 2.5836,
"theoretical_loss": 3.3201725195765155,
"tokens_seen": 3006922752
},
{
"epoch": 0.05,
"learning_rate": 0.000955945252352438,
"loss": 2.5358,
"theoretical_loss": 3.3201613975471402,
"tokens_seen": 3007053824
},
{
"epoch": 0.05,
"learning_rate": 0.0009555175363558597,
"loss": 2.6035,
"theoretical_loss": 3.3201502761382775,
"tokens_seen": 3007184896
},
{
"epoch": 0.05,
"learning_rate": 0.0009550898203592815,
"loss": 2.4594,
"theoretical_loss": 3.320139155349866,
"tokens_seen": 3007315968
},
{
"epoch": 0.06,
"learning_rate": 0.0009546621043627032,
"loss": 2.6376,
"theoretical_loss": 3.3201280351818436,
"tokens_seen": 3007447040
},
{
"epoch": 0.06,
"learning_rate": 0.0009542343883661248,
"loss": 2.5904,
"theoretical_loss": 3.320116915634149,
"tokens_seen": 3007578112
},
{
"epoch": 0.06,
"learning_rate": 0.0009538066723695466,
"loss": 2.7616,
"theoretical_loss": 3.3201057967067205,
"tokens_seen": 3007709184
},
{
"epoch": 0.06,
"learning_rate": 0.0009533789563729683,
"loss": 2.6076,
"theoretical_loss": 3.3200946783994962,
"tokens_seen": 3007840256
},
{
"epoch": 0.06,
"learning_rate": 0.0009529512403763902,
"loss": 2.6121,
"theoretical_loss": 3.3200835607124146,
"tokens_seen": 3007971328
},
{
"epoch": 0.06,
"learning_rate": 0.0009525235243798119,
"loss": 2.6083,
"theoretical_loss": 3.3200724436454143,
"tokens_seen": 3008102400
},
{
"epoch": 0.06,
"learning_rate": 0.0009520958083832335,
"loss": 2.6831,
"theoretical_loss": 3.3200613271984336,
"tokens_seen": 3008233472
},
{
"epoch": 0.06,
"learning_rate": 0.0009516680923866553,
"loss": 2.6156,
"theoretical_loss": 3.3200502113714108,
"tokens_seen": 3008364544
},
{
"epoch": 0.06,
"objective/train/docs_used": 1649940,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.545426368713379,
"objective/train/theoretical_loss": 3.3200446536903643,
"objective/train/tokens_used": 38481376,
"theoretical_loss": 3.3200446536903643,
"tokens_seen": 3008430080
},
{
"epoch": 0.06,
"learning_rate": 0.000951240376390077,
"loss": 2.6849,
"theoretical_loss": 3.3200390961642845,
"tokens_seen": 3008495616
},
{
"epoch": 0.06,
"learning_rate": 0.0009508126603934988,
"loss": 2.654,
"theoretical_loss": 3.3200279815769926,
"tokens_seen": 3008626688
},
{
"epoch": 0.06,
"learning_rate": 0.0009503849443969204,
"loss": 2.6215,
"theoretical_loss": 3.3200168676094743,
"tokens_seen": 3008757760
},
{
"epoch": 0.06,
"learning_rate": 0.0009499572284003421,
"loss": 2.4387,
"theoretical_loss": 3.320005754261668,
"tokens_seen": 3008888832
},
{
"epoch": 0.06,
"learning_rate": 0.0009495295124037639,
"loss": 2.5621,
"theoretical_loss": 3.319994641533511,
"tokens_seen": 3009019904
},
{
"epoch": 0.06,
"learning_rate": 0.0009491017964071857,
"loss": 2.731,
"theoretical_loss": 3.319983529424943,
"tokens_seen": 3009150976
},
{
"epoch": 0.06,
"learning_rate": 0.0009486740804106075,
"loss": 2.5847,
"theoretical_loss": 3.3199724179359027,
"tokens_seen": 3009282048
},
{
"epoch": 0.06,
"learning_rate": 0.0009482463644140291,
"loss": 2.5836,
"theoretical_loss": 3.319961307066327,
"tokens_seen": 3009413120
},
{
"epoch": 0.06,
"learning_rate": 0.0009478186484174508,
"loss": 2.6488,
"theoretical_loss": 3.3199501968161558,
"tokens_seen": 3009544192
},
{
"epoch": 0.06,
"learning_rate": 0.0009473909324208726,
"loss": 2.6244,
"theoretical_loss": 3.319939087185327,
"tokens_seen": 3009675264
},
{
"epoch": 0.06,
"learning_rate": 0.0009469632164242943,
"loss": 2.6873,
"theoretical_loss": 3.3199279781737796,
"tokens_seen": 3009806336
},
{
"epoch": 0.06,
"learning_rate": 0.000946535500427716,
"loss": 2.5921,
"theoretical_loss": 3.3199168697814514,
"tokens_seen": 3009937408
},
{
"epoch": 0.06,
"objective/train/docs_used": 1651249,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.582075834274292,
"objective/train/theoretical_loss": 3.3199057620082812,
"objective/train/tokens_used": 40119776,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 0.0009461077844311377,
"loss": 2.5527,
"theoretical_loss": 3.3199057620082812,
"tokens_seen": 3010068480
},
{
"epoch": 0.06,
"learning_rate": 0.0009456800684345594,
"loss": 2.7818,
"theoretical_loss": 3.319894654854208,
"tokens_seen": 3010199552
},
{
"epoch": 0.06,
"learning_rate": 0.0009452523524379812,
"loss": 2.628,
"theoretical_loss": 3.3198835483191695,
"tokens_seen": 3010330624
},
{
"epoch": 0.06,
"learning_rate": 0.000944824636441403,
"loss": 2.4649,
"theoretical_loss": 3.319872442403105,
"tokens_seen": 3010461696
},
{
"epoch": 0.07,
"learning_rate": 0.0009443969204448247,
"loss": 2.7571,
"theoretical_loss": 3.3198613371059524,
"tokens_seen": 3010592768
},
{
"epoch": 0.07,
"learning_rate": 0.0009439692044482464,
"loss": 2.5878,
"theoretical_loss": 3.319850232427651,
"tokens_seen": 3010723840
},
{
"epoch": 0.07,
"learning_rate": 0.0009435414884516681,
"loss": 2.6015,
"theoretical_loss": 3.3198391283681383,
"tokens_seen": 3010854912
},
{
"epoch": 0.07,
"learning_rate": 0.0009431137724550899,
"loss": 2.5823,
"theoretical_loss": 3.3198280249273546,
"tokens_seen": 3010985984
},
{
"epoch": 0.07,
"learning_rate": 0.0009426860564585116,
"loss": 2.7222,
"theoretical_loss": 3.319816922105237,
"tokens_seen": 3011117056
},
{
"epoch": 0.07,
"learning_rate": 0.0009422583404619332,
"loss": 2.6364,
"theoretical_loss": 3.319805819901724,
"tokens_seen": 3011248128
},
{
"epoch": 0.07,
"learning_rate": 0.000941830624465355,
"loss": 2.6811,
"theoretical_loss": 3.3197947183167553,
"tokens_seen": 3011379200
},
{
"epoch": 0.07,
"learning_rate": 0.0009414029084687767,
"loss": 2.8016,
"theoretical_loss": 3.319783617350269,
"tokens_seen": 3011510272
},
{
"epoch": 0.07,
"learning_rate": 0.0009409751924721985,
"loss": 2.5657,
"theoretical_loss": 3.319772517002204,
"tokens_seen": 3011641344
},
{
"epoch": 0.07,
"objective/train/docs_used": 1651905,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 3.0052103996276855,
"objective/train/theoretical_loss": 3.31976696706006,
"objective/train/tokens_used": 41758176,
"theoretical_loss": 3.31976696706006,
"tokens_seen": 3011706880
},
{
"epoch": 0.07,
"learning_rate": 0.0009405474764756203,
"loss": 2.6357,
"theoretical_loss": 3.319761417272498,
"tokens_seen": 3011772416
},
{
"epoch": 0.07,
"learning_rate": 0.0009401197604790419,
"loss": 2.5734,
"theoretical_loss": 3.319750318161091,
"tokens_seen": 3011903488
},
{
"epoch": 0.07,
"learning_rate": 0.0009396920444824637,
"loss": 2.424,
"theoretical_loss": 3.3197392196679205,
"tokens_seen": 3012034560
},
{
"epoch": 0.07,
"learning_rate": 0.0009392643284858854,
"loss": 2.5344,
"theoretical_loss": 3.3197281217929255,
"tokens_seen": 3012165632
},
{
"epoch": 0.07,
"learning_rate": 0.0009388366124893071,
"loss": 2.5689,
"theoretical_loss": 3.319717024536045,
"tokens_seen": 3012296704
},
{
"epoch": 0.07,
"learning_rate": 0.0009384088964927289,
"loss": 2.4989,
"theoretical_loss": 3.3197059278972176,
"tokens_seen": 3012427776
},
{
"epoch": 0.07,
"learning_rate": 0.0009379811804961505,
"loss": 2.6272,
"theoretical_loss": 3.3196948318763817,
"tokens_seen": 3012558848
},
{
"epoch": 0.07,
"learning_rate": 0.0009375534644995723,
"loss": 2.5959,
"theoretical_loss": 3.319683736473476,
"tokens_seen": 3012689920
},
{
"epoch": 0.07,
"learning_rate": 0.000937125748502994,
"loss": 2.5228,
"theoretical_loss": 3.3196726416884395,
"tokens_seen": 3012820992
},
{
"epoch": 0.07,
"learning_rate": 0.0009366980325064158,
"loss": 2.7357,
"theoretical_loss": 3.3196615475212106,
"tokens_seen": 3012952064
},
{
"epoch": 0.07,
"learning_rate": 0.0009362703165098376,
"loss": 2.6946,
"theoretical_loss": 3.3196504539717284,
"tokens_seen": 3013083136
},
{
"epoch": 0.07,
"learning_rate": 0.0009358426005132592,
"loss": 2.7344,
"theoretical_loss": 3.3196393610399317,
"tokens_seen": 3013214208
},
{
"epoch": 0.07,
"objective/train/docs_used": 1652881,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.5316507816314697,
"objective/train/theoretical_loss": 3.3196282687257583,
"objective/train/tokens_used": 43396576,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 0.000935414884516681,
"loss": 2.7259,
"theoretical_loss": 3.3196282687257583,
"tokens_seen": 3013345280
},
{
"epoch": 0.07,
"learning_rate": 0.0009349871685201027,
"loss": 2.4882,
"theoretical_loss": 3.3196171770291483,
"tokens_seen": 3013476352
},
{
"epoch": 0.07,
"learning_rate": 0.0009345594525235244,
"loss": 2.6169,
"theoretical_loss": 3.3196060859500394,
"tokens_seen": 3013607424
},
{
"epoch": 0.08,
"learning_rate": 0.0009341317365269461,
"loss": 2.5293,
"theoretical_loss": 3.319594995488371,
"tokens_seen": 3013738496
},
{
"epoch": 0.08,
"learning_rate": 0.0009337040205303678,
"loss": 2.7782,
"theoretical_loss": 3.3195839056440812,
"tokens_seen": 3013869568
},
{
"epoch": 0.08,
"learning_rate": 0.0009332763045337895,
"loss": 2.6719,
"theoretical_loss": 3.3195728164171094,
"tokens_seen": 3014000640
},
{
"epoch": 0.08,
"learning_rate": 0.0009328485885372114,
"loss": 2.7268,
"theoretical_loss": 3.319561727807394,
"tokens_seen": 3014131712
},
{
"epoch": 0.08,
"learning_rate": 0.0009324208725406331,
"loss": 2.5997,
"theoretical_loss": 3.3195506398148744,
"tokens_seen": 3014262784
},
{
"epoch": 0.08,
"learning_rate": 0.0009319931565440548,
"loss": 2.7602,
"theoretical_loss": 3.319539552439489,
"tokens_seen": 3014393856
},
{
"epoch": 0.08,
"learning_rate": 0.0009315654405474765,
"loss": 2.5845,
"theoretical_loss": 3.3195284656811763,
"tokens_seen": 3014524928
},
{
"epoch": 0.08,
"learning_rate": 0.0009311377245508982,
"loss": 2.7053,
"theoretical_loss": 3.319517379539876,
"tokens_seen": 3014656000
},
{
"epoch": 0.08,
"learning_rate": 0.00093071000855432,
"loss": 2.6045,
"theoretical_loss": 3.3195062940155258,
"tokens_seen": 3014787072
},
{
"epoch": 0.08,
"learning_rate": 0.0009302822925577416,
"loss": 2.6324,
"theoretical_loss": 3.3194952091080654,
"tokens_seen": 3014918144
},
{
"epoch": 0.08,
"objective/train/docs_used": 1653310,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.4821465015411377,
"objective/train/theoretical_loss": 3.3194896668856497,
"objective/train/tokens_used": 45034976,
"theoretical_loss": 3.3194896668856497,
"tokens_seen": 3014983680
},
{
"epoch": 0.08,
"learning_rate": 0.0009298545765611634,
"loss": 2.532,
"theoretical_loss": 3.3194841248174334,
"tokens_seen": 3015049216
},
{
"epoch": 0.08,
"learning_rate": 0.0009294268605645851,
"loss": 2.544,
"theoretical_loss": 3.3194730411435684,
"tokens_seen": 3015180288
},
{
"epoch": 0.08,
"learning_rate": 0.0009289991445680068,
"loss": 2.826,
"theoretical_loss": 3.3194619580864098,
"tokens_seen": 3015311360
},
{
"epoch": 0.08,
"learning_rate": 0.0009285714285714287,
"loss": 2.7561,
"theoretical_loss": 3.3194508756458965,
"tokens_seen": 3015442432
},
{
"epoch": 0.08,
"learning_rate": 0.0009281437125748503,
"loss": 2.5692,
"theoretical_loss": 3.319439793821967,
"tokens_seen": 3015573504
},
{
"epoch": 0.08,
"learning_rate": 0.000927715996578272,
"loss": 2.6322,
"theoretical_loss": 3.3194287126145596,
"tokens_seen": 3015704576
},
{
"epoch": 0.08,
"learning_rate": 0.0009272882805816938,
"loss": 2.6346,
"theoretical_loss": 3.3194176320236144,
"tokens_seen": 3015835648
},
{
"epoch": 0.08,
"learning_rate": 0.0009268605645851155,
"loss": 2.7908,
"theoretical_loss": 3.31940655204907,
"tokens_seen": 3015966720
},
{
"epoch": 0.08,
"learning_rate": 0.0009264328485885373,
"loss": 2.6439,
"theoretical_loss": 3.319395472690865,
"tokens_seen": 3016097792
},
{
"epoch": 0.08,
"learning_rate": 0.0009260051325919589,
"loss": 2.5589,
"theoretical_loss": 3.3193843939489382,
"tokens_seen": 3016228864
},
{
"epoch": 0.08,
"learning_rate": 0.0009255774165953806,
"loss": 2.6926,
"theoretical_loss": 3.319373315823229,
"tokens_seen": 3016359936
},
{
"epoch": 0.08,
"learning_rate": 0.0009251497005988024,
"loss": 2.7646,
"theoretical_loss": 3.3193622383136763,
"tokens_seen": 3016491008
},
{
"epoch": 0.08,
"objective/train/docs_used": 1654644,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3390886783599854,
"objective/train/theoretical_loss": 3.3193511614202187,
"objective/train/tokens_used": 46673376,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.08,
"learning_rate": 0.0009247219846022242,
"loss": 2.5287,
"theoretical_loss": 3.3193511614202187,
"tokens_seen": 3016622080
},
{
"epoch": 0.09,
"learning_rate": 0.000924294268605646,
"loss": 2.7607,
"theoretical_loss": 3.319340085142796,
"tokens_seen": 3016753152
},
{
"epoch": 0.09,
"learning_rate": 0.0009238665526090676,
"loss": 2.6289,
"theoretical_loss": 3.319329009481346,
"tokens_seen": 3016884224
},
{
"epoch": 0.09,
"learning_rate": 0.0009234388366124893,
"loss": 2.7648,
"theoretical_loss": 3.3193179344358086,
"tokens_seen": 3017015296
},
{
"epoch": 0.09,
"learning_rate": 0.0009230111206159111,
"loss": 2.497,
"theoretical_loss": 3.319306860006122,
"tokens_seen": 3017146368
},
{
"epoch": 0.09,
"learning_rate": 0.0009225834046193328,
"loss": 2.4963,
"theoretical_loss": 3.319295786192226,
"tokens_seen": 3017277440
},
{
"epoch": 0.09,
"learning_rate": 0.0009221556886227545,
"loss": 2.5823,
"theoretical_loss": 3.319284712994059,
"tokens_seen": 3017408512
},
{
"epoch": 0.09,
"learning_rate": 0.0009217279726261762,
"loss": 2.6555,
"theoretical_loss": 3.3192736404115606,
"tokens_seen": 3017539584
},
{
"epoch": 0.09,
"learning_rate": 0.0009213002566295979,
"loss": 2.5264,
"theoretical_loss": 3.3192625684446693,
"tokens_seen": 3017670656
},
{
"epoch": 0.09,
"learning_rate": 0.0009208725406330197,
"loss": 2.397,
"theoretical_loss": 3.3192514970933242,
"tokens_seen": 3017801728
},
{
"epoch": 0.09,
"learning_rate": 0.0009204448246364415,
"loss": 2.6273,
"theoretical_loss": 3.319240426357465,
"tokens_seen": 3017932800
},
{
"epoch": 0.09,
"learning_rate": 0.0009200171086398631,
"loss": 2.6414,
"theoretical_loss": 3.31922935623703,
"tokens_seen": 3018063872
},
{
"epoch": 0.09,
"learning_rate": 0.0009195893926432849,
"loss": 2.5706,
"theoretical_loss": 3.3192182867319584,
"tokens_seen": 3018194944
},
{
"epoch": 0.09,
"objective/train/docs_used": 1655335,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.3051180839538574,
"objective/train/theoretical_loss": 3.319212752210165,
"objective/train/tokens_used": 48311776,
"theoretical_loss": 3.319212752210165,
"tokens_seen": 3018260480
},
{
"epoch": 0.09,
"learning_rate": 0.0009191616766467066,
"loss": 2.5393,
"theoretical_loss": 3.3192072178421896,
"tokens_seen": 3018326016
},
{
"epoch": 0.09,
"learning_rate": 0.0009187339606501284,
"loss": 2.6545,
"theoretical_loss": 3.319196149567662,
"tokens_seen": 3018457088
},
{
"epoch": 0.09,
"learning_rate": 0.00091830624465355,
"loss": 2.5623,
"theoretical_loss": 3.3191850819083157,
"tokens_seen": 3018588160
},
{
"epoch": 0.09,
"learning_rate": 0.0009178785286569717,
"loss": 2.6804,
"theoretical_loss": 3.319174014864089,
"tokens_seen": 3018719232
},
{
"epoch": 0.09,
"learning_rate": 0.0009174508126603935,
"loss": 2.8051,
"theoretical_loss": 3.319162948434921,
"tokens_seen": 3018850304
},
{
"epoch": 0.09,
"learning_rate": 0.0009170230966638152,
"loss": 2.7044,
"theoretical_loss": 3.319151882620752,
"tokens_seen": 3018981376
},
{
"epoch": 0.09,
"learning_rate": 0.0009165953806672371,
"loss": 2.5421,
"theoretical_loss": 3.3191408174215193,
"tokens_seen": 3019112448
},
{
"epoch": 0.09,
"learning_rate": 0.0009161676646706587,
"loss": 2.7474,
"theoretical_loss": 3.3191297528371635,
"tokens_seen": 3019243520
},
{
"epoch": 0.09,
"learning_rate": 0.0009157399486740804,
"loss": 2.5804,
"theoretical_loss": 3.319118688867623,
"tokens_seen": 3019374592
},
{
"epoch": 0.09,
"learning_rate": 0.0009153122326775022,
"loss": 2.5145,
"theoretical_loss": 3.319107625512837,
"tokens_seen": 3019505664
},
{
"epoch": 0.09,
"learning_rate": 0.0009148845166809239,
"loss": 2.746,
"theoretical_loss": 3.3190965627727445,
"tokens_seen": 3019636736
},
{
"epoch": 0.09,
"learning_rate": 0.0009144568006843457,
"loss": 2.5949,
"theoretical_loss": 3.3190855006472857,
"tokens_seen": 3019767808
},
{
"epoch": 0.09,
"objective/train/docs_used": 1656670,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.1859257221221924,
"objective/train/theoretical_loss": 3.3190744391363984,
"objective/train/tokens_used": 49950176,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 0.0009140290846877673,
"loss": 2.6334,
"theoretical_loss": 3.3190744391363984,
"tokens_seen": 3019898880
},
{
"epoch": 0.1,
"learning_rate": 0.000913601368691189,
"loss": 2.6976,
"theoretical_loss": 3.3190633782400223,
"tokens_seen": 3020029952
},
{
"epoch": 0.1,
"learning_rate": 0.0009131736526946108,
"loss": 2.5658,
"theoretical_loss": 3.3190523179580973,
"tokens_seen": 3020161024
},
{
"epoch": 0.1,
"learning_rate": 0.0009127459366980325,
"loss": 2.7495,
"theoretical_loss": 3.3190412582905617,
"tokens_seen": 3020292096
},
{
"epoch": 0.1,
"learning_rate": 0.0009123182207014543,
"loss": 2.7093,
"theoretical_loss": 3.319030199237355,
"tokens_seen": 3020423168
},
{
"epoch": 0.1,
"learning_rate": 0.000911890504704876,
"loss": 2.5295,
"theoretical_loss": 3.3190191407984164,
"tokens_seen": 3020554240
},
{
"epoch": 0.1,
"learning_rate": 0.0009114627887082977,
"loss": 2.6081,
"theoretical_loss": 3.3190080829736854,
"tokens_seen": 3020685312
},
{
"epoch": 0.1,
"learning_rate": 0.0009110350727117195,
"loss": 2.6166,
"theoretical_loss": 3.318997025763101,
"tokens_seen": 3020816384
},
{
"epoch": 0.1,
"learning_rate": 0.0009106073567151412,
"loss": 2.7691,
"theoretical_loss": 3.318985969166602,
"tokens_seen": 3020947456
},
{
"epoch": 0.1,
"learning_rate": 0.0009101796407185628,
"loss": 2.6465,
"theoretical_loss": 3.3189749131841286,
"tokens_seen": 3021078528
},
{
"epoch": 0.1,
"learning_rate": 0.0009097519247219846,
"loss": 2.5314,
"theoretical_loss": 3.3189638578156195,
"tokens_seen": 3021209600
},
{
"epoch": 0.1,
"learning_rate": 0.0009093242087254063,
"loss": 2.8726,
"theoretical_loss": 3.3189528030610136,
"tokens_seen": 3021340672
},
{
"epoch": 0.1,
"learning_rate": 0.0009088964927288281,
"loss": 2.6762,
"theoretical_loss": 3.318941748920251,
"tokens_seen": 3021471744
},
{
"epoch": 0.1,
"objective/train/docs_used": 1657192,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.9018635749816895,
"objective/train/theoretical_loss": 3.318936222080042,
"objective/train/tokens_used": 51588576,
"theoretical_loss": 3.318936222080042,
"tokens_seen": 3021537280
},
{
"epoch": 0.1,
"learning_rate": 0.0009084687767322499,
"loss": 2.643,
"theoretical_loss": 3.318930695393271,
"tokens_seen": 3021602816
},
{
"epoch": 0.1,
"learning_rate": 0.0009080410607356715,
"loss": 2.6665,
"theoretical_loss": 3.3189196424800116,
"tokens_seen": 3021733888
},
{
"epoch": 0.1,
"learning_rate": 0.0009076133447390933,
"loss": 2.6265,
"theoretical_loss": 3.3189085901804134,
"tokens_seen": 3021864960
},
{
"epoch": 0.1,
"learning_rate": 0.000907185628742515,
"loss": 2.5788,
"theoretical_loss": 3.3188975384944155,
"tokens_seen": 3021996032
},
{
"epoch": 0.1,
"learning_rate": 0.0009067579127459367,
"loss": 2.6531,
"theoretical_loss": 3.318886487421957,
"tokens_seen": 3022127104
},
{
"epoch": 0.1,
"learning_rate": 0.0009063301967493585,
"loss": 2.7177,
"theoretical_loss": 3.318875436962977,
"tokens_seen": 3022258176
},
{
"epoch": 0.1,
"learning_rate": 0.0009059024807527801,
"loss": 2.4989,
"theoretical_loss": 3.3188643871174155,
"tokens_seen": 3022389248
},
{
"epoch": 0.1,
"learning_rate": 0.0009054747647562019,
"loss": 2.5249,
"theoretical_loss": 3.318853337885211,
"tokens_seen": 3022520320
},
{
"epoch": 0.1,
"learning_rate": 0.0009050470487596236,
"loss": 2.6031,
"theoretical_loss": 3.318842289266304,
"tokens_seen": 3022651392
},
{
"epoch": 0.1,
"learning_rate": 0.0009046193327630453,
"loss": 2.5702,
"theoretical_loss": 3.3188312412606327,
"tokens_seen": 3022782464
},
{
"epoch": 0.1,
"learning_rate": 0.0009041916167664672,
"loss": 2.5348,
"theoretical_loss": 3.3188201938681368,
"tokens_seen": 3022913536
},
{
"epoch": 0.11,
"learning_rate": 0.0009037639007698888,
"loss": 2.5876,
"theoretical_loss": 3.318809147088756,
"tokens_seen": 3023044608
},
{
"epoch": 0.11,
"objective/train/docs_used": 1658380,
"objective/train/instantaneous_batch_size": 16,
"objective/train/instantaneous_microbatch_size": 16384,
"objective/train/original_loss": 2.8608322143554688,
"objective/train/theoretical_loss": 3.3187981009224297,
"objective/train/tokens_used": 53226976,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 0.0009033361847733106,
"loss": 2.6644,
"theoretical_loss": 3.3187981009224297,
"tokens_seen": 3023175680
},
{
"epoch": 0.11,
"learning_rate": 0.0009029084687767323,
"loss": 2.5824,
"theoretical_loss": 3.3187870553690972,
"tokens_seen": 3023306752
}
],
"max_steps": 2362,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.6789580808192e+16,
"trial_name": null,
"trial_params": null
}