|
{"step": 310, "epoch": 0.1262600549842175, "loss": 1.7454, "learning_rate": 0.00019519090976794406, "train_runtime": null} |
|
{"step": 320, "epoch": 0.1303329599837084, "loss": 1.754, "learning_rate": 0.00019477838400112254, "train_runtime": null} |
|
{"step": 330, "epoch": 0.13440586498319926, "loss": 1.6665, "learning_rate": 0.00019434935823394746, "train_runtime": null} |
|
{"step": 340, "epoch": 0.13847876998269015, "loss": 1.6773, "learning_rate": 0.00019390390715565725, "train_runtime": null} |
|
{"step": 350, "epoch": 0.14255167498218105, "loss": 1.6328, "learning_rate": 0.000193442108314978, "train_runtime": null} |
|
{"step": 360, "epoch": 0.14662457998167192, "loss": 1.5659, "learning_rate": 0.00019296404210662331, "train_runtime": null} |
|
{"step": 370, "epoch": 0.1506974849811628, "loss": 1.7083, "learning_rate": 0.00019246979175729822, "train_runtime": null} |
|
{"step": 380, "epoch": 0.1547703899806537, "loss": 1.6854, "learning_rate": 0.00019195944331121015, "train_runtime": null} |
|
{"step": 390, "epoch": 0.1588432949801446, "loss": 1.7282, "learning_rate": 0.0001914330856150897, "train_runtime": null} |
|
{"step": 400, "epoch": 0.16291619997963547, "loss": 1.5562, "learning_rate": 0.00019089081030272296, "train_runtime": null} |
|
{"step": 410, "epoch": 0.16698910497912636, "loss": 1.6452, "learning_rate": 0.00019033271177899922, "train_runtime": null} |
|
{"step": 420, "epoch": 0.17106200997861726, "loss": 1.626, "learning_rate": 0.0001897588872034758, "train_runtime": null} |
|
{"step": 430, "epoch": 0.17513491497810812, "loss": 1.7451, "learning_rate": 0.00018916943647346375, "train_runtime": null} |
|
{"step": 440, "epoch": 0.17920781997759902, "loss": 1.846, "learning_rate": 0.0001885644622066364, "train_runtime": null} |
|
{"step": 450, "epoch": 0.18328072497708991, "loss": 1.671, "learning_rate": 0.00018794406972316482, "train_runtime": null} |
|
{"step": 460, "epoch": 0.1873536299765808, "loss": 1.4983, "learning_rate": 0.00018730836702738257, "train_runtime": null} |
|
{"step": 470, "epoch": 0.19142653497607168, "loss": 1.6776, "learning_rate": 0.0001866574647889831, "train_runtime": null} |
|
{"step": 480, "epoch": 0.19549943997556257, "loss": 1.802, "learning_rate": 0.00018599147632375332, "train_runtime": null} |
|
{"step": 490, "epoch": 0.19957234497505347, "loss": 1.7161, "learning_rate": 0.00018531051757384633, "train_runtime": null} |
|
{"step": 500, "epoch": 0.20364524997454433, "loss": 1.7042, "learning_rate": 0.00018461470708759712, "train_runtime": null} |
|
{"step": 510, "epoch": 0.20771815497403523, "loss": 1.689, "learning_rate": 0.00018390416599888435, "train_runtime": null} |
|
{"step": 520, "epoch": 0.21179105997352612, "loss": 1.5619, "learning_rate": 0.0001831790180060422, "train_runtime": null} |
|
{"step": 530, "epoch": 0.215863964973017, "loss": 1.6877, "learning_rate": 0.00018243938935032561, "train_runtime": null} |
|
{"step": 540, "epoch": 0.2199368699725079, "loss": 1.7831, "learning_rate": 0.00018168540879393296, "train_runtime": null} |
|
{"step": 550, "epoch": 0.22400977497199878, "loss": 1.7288, "learning_rate": 0.0001809172075975897, "train_runtime": null} |
|
{"step": 560, "epoch": 0.22808267997148968, "loss": 1.7188, "learning_rate": 0.00018013491949769734, "train_runtime": null} |
|
{"step": 570, "epoch": 0.23215558497098054, "loss": 1.6244, "learning_rate": 0.00017933868068305104, "train_runtime": null} |
|
{"step": 580, "epoch": 0.23622848997047144, "loss": 1.5565, "learning_rate": 0.0001785286297711305, "train_runtime": null} |
|
{"step": 590, "epoch": 0.24030139496996233, "loss": 1.6532, "learning_rate": 0.00017770490778396808, "train_runtime": null} |
|
{"step": 600, "epoch": 0.2443742999694532, "loss": 1.7585, "learning_rate": 0.00017686765812359808, "train_runtime": null} |
|
{"step": 610, "epoch": 0.2484472049689441, "loss": 1.6401, "learning_rate": 0.0001760170265470921, "train_runtime": null} |
|
{"step": 620, "epoch": 0.252520109968435, "loss": 1.6915, "learning_rate": 0.00017515316114118375, "train_runtime": null} |
|
{"step": 630, "epoch": 0.25659301496792586, "loss": 1.5666, "learning_rate": 0.00017427621229648853, "train_runtime": null} |
|
{"step": 640, "epoch": 0.2606659199674168, "loss": 1.5926, "learning_rate": 0.00017338633268132212, "train_runtime": null} |
|
{"step": 650, "epoch": 0.26473882496690765, "loss": 1.5925, "learning_rate": 0.0001724836772151223, "train_runtime": null} |
|
{"step": 660, "epoch": 0.2688117299663985, "loss": 1.6237, "learning_rate": 0.00017156840304147902, "train_runtime": null} |
|
{"step": 670, "epoch": 0.27288463496588944, "loss": 1.7356, "learning_rate": 0.00017064066950077722, "train_runtime": null} |
|
{"step": 680, "epoch": 0.2769575399653803, "loss": 1.5585, "learning_rate": 0.00016970063810245716, "train_runtime": null} |
|
{"step": 690, "epoch": 0.2810304449648712, "loss": 1.5913, "learning_rate": 0.00016874847249689722, "train_runtime": null} |
|
{"step": 700, "epoch": 0.2851033499643621, "loss": 1.6791, "learning_rate": 0.00016778433844692397, "train_runtime": null} |
|
{"step": 710, "epoch": 0.28917625496385296, "loss": 1.5153, "learning_rate": 0.0001668084037989544, "train_runtime": null} |
|
{"step": 720, "epoch": 0.29324915996334383, "loss": 1.6821, "learning_rate": 0.00016582083845377552, "train_runtime": null} |
|
{"step": 730, "epoch": 0.29732206496283475, "loss": 1.8462, "learning_rate": 0.00016482181433696643, "train_runtime": null} |
|
{"step": 740, "epoch": 0.3013949699623256, "loss": 1.5756, "learning_rate": 0.00016381150536896736, "train_runtime": null} |
|
{"step": 750, "epoch": 0.3054678749618165, "loss": 1.6003, "learning_rate": 0.0001627900874348022, "train_runtime": null} |
|
{"step": 760, "epoch": 0.3095407799613074, "loss": 1.6576, "learning_rate": 0.0001617577383534584, "train_runtime": null} |
|
{"step": 770, "epoch": 0.3136136849607983, "loss": 1.6181, "learning_rate": 0.00016071463784693045, "train_runtime": null} |
|
{"step": 780, "epoch": 0.3176865899602892, "loss": 1.5142, "learning_rate": 0.00015966096750893197, "train_runtime": null} |
|
{"step": 790, "epoch": 0.32175949495978007, "loss": 1.6583, "learning_rate": 0.00015859691077328215, "train_runtime": null} |
|
{"step": 800, "epoch": 0.32583239995927094, "loss": 1.6468, "learning_rate": 0.00015752265288197155, "train_runtime": null} |
|
{"step": 810, "epoch": 0.32990530495876186, "loss": 1.8431, "learning_rate": 0.00015643838085291323, "train_runtime": null} |
|
{"step": 820, "epoch": 0.3339782099582527, "loss": 1.7042, "learning_rate": 0.00015534428344738505, "train_runtime": null} |
|
{"step": 830, "epoch": 0.3380511149577436, "loss": 1.5479, "learning_rate": 0.00015424055113716763, "train_runtime": null} |
|
{"step": 840, "epoch": 0.3421240199572345, "loss": 1.5303, "learning_rate": 0.0001531273760713855, "train_runtime": null} |
|
{"step": 850, "epoch": 0.3461969249567254, "loss": 1.5586, "learning_rate": 0.00015200495204305574, "train_runtime": null} |
|
{"step": 860, "epoch": 0.35026982995621625, "loss": 1.8219, "learning_rate": 0.00015087347445535013, "train_runtime": null} |
|
{"step": 870, "epoch": 0.3543427349557072, "loss": 1.7261, "learning_rate": 0.00014973314028757787, "train_runtime": null} |
|
{"step": 880, "epoch": 0.35841563995519804, "loss": 1.6982, "learning_rate": 0.00014858414806089295, "train_runtime": null} |
|
{"step": 890, "epoch": 0.3624885449546889, "loss": 1.5318, "learning_rate": 0.0001474266978037338, "train_runtime": null} |
|
{"step": 900, "epoch": 0.36656144995417983, "loss": 1.7901, "learning_rate": 0.00014626099101700018, "train_runtime": null} |
|
{"step": 910, "epoch": 0.3706343549536707, "loss": 1.495, "learning_rate": 0.00014508723063897376, "train_runtime": null} |
|
{"step": 910, "epoch": 0.3706343549536707, "loss": 1.4936, "learning_rate": 0.00014508723063897376, "train_runtime": null} |
|
{"step": 920, "epoch": 0.3747072599531616, "loss": 1.5804, "learning_rate": 0.00014390562100998868, "train_runtime": null} |
|
{"step": 930, "epoch": 0.3787801649526525, "loss": 1.6731, "learning_rate": 0.00014271636783685777, "train_runtime": null} |
|
{"step": 940, "epoch": 0.38285306995214335, "loss": 1.7237, "learning_rate": 0.00014151967815706091, "train_runtime": null} |
|
{"step": 950, "epoch": 0.3869259749516343, "loss": 1.5355, "learning_rate": 0.00014031576030270202, "train_runtime": null} |
|
{"step": 960, "epoch": 0.39099887995112514, "loss": 1.7247, "learning_rate": 0.00013910482386424023, "train_runtime": null} |
|
{"step": 970, "epoch": 0.395071784950616, "loss": 1.6796, "learning_rate": 0.00013788707965400236, "train_runtime": null} |
|
{"step": 980, "epoch": 0.39914468995010693, "loss": 1.5955, "learning_rate": 0.00013666273966948252, "train_runtime": null} |
|
{"step": 990, "epoch": 0.4032175949495978, "loss": 1.647, "learning_rate": 0.00013543201705643526, "train_runtime": null} |
|
{"step": 1000, "epoch": 0.40729049994908867, "loss": 1.7161, "learning_rate": 0.00013419512607176914, "train_runtime": null} |
|
{"step": 1010, "epoch": 0.4113634049485796, "loss": 1.544, "learning_rate": 0.00013295228204624648, "train_runtime": null} |
|
{"step": 1020, "epoch": 0.41543630994807046, "loss": 1.6287, "learning_rate": 0.00013170370134699653, "train_runtime": null} |
|
{"step": 1030, "epoch": 0.4195092149475613, "loss": 1.6858, "learning_rate": 0.00013044960133984804, "train_runtime": null} |
|
{"step": 1040, "epoch": 0.42358211994705225, "loss": 1.7392, "learning_rate": 0.00012919020035148776, "train_runtime": null} |
|
{"step": 1050, "epoch": 0.4276550249465431, "loss": 1.5007, "learning_rate": 0.0001279257176314521, "train_runtime": null} |
|
{"step": 1060, "epoch": 0.431727929946034, "loss": 1.487, "learning_rate": 0.00012665637331395785, "train_runtime": null} |
|
{"step": 1070, "epoch": 0.4358008349455249, "loss": 1.4913, "learning_rate": 0.00012538238837957882, "train_runtime": null} |
|
{"step": 1080, "epoch": 0.4398737399450158, "loss": 1.6263, "learning_rate": 0.00012410398461677554, "train_runtime": null} |
|
{"step": 1090, "epoch": 0.4439466449445067, "loss": 1.7378, "learning_rate": 0.00012282138458328358, "train_runtime": null} |
|
{"step": 1100, "epoch": 0.44801954994399756, "loss": 1.7385, "learning_rate": 0.00012153481156736892, "train_runtime": null} |
|
{"step": 1110, "epoch": 0.45209245494348843, "loss": 1.5212, "learning_rate": 0.00012024448954895522, "train_runtime": null} |
|
{"step": 1120, "epoch": 0.45616535994297935, "loss": 1.5254, "learning_rate": 0.00011895064316063127, "train_runtime": null} |
|
{"step": 1130, "epoch": 0.4602382649424702, "loss": 1.5704, "learning_rate": 0.00011765349764854461, "train_runtime": null} |
|
{"step": 1140, "epoch": 0.4643111699419611, "loss": 1.5893, "learning_rate": 0.00011635327883318831, "train_runtime": null} |
|
{"step": 1150, "epoch": 0.468384074941452, "loss": 1.6388, "learning_rate": 0.00011505021307008785, "train_runtime": null} |
|
{"step": 1160, "epoch": 0.4724569799409429, "loss": 1.7192, "learning_rate": 0.00011374452721039477, "train_runtime": null} |
|
{"step": 1170, "epoch": 0.47652988494043375, "loss": 1.6048, "learning_rate": 0.00011243644856139403, "train_runtime": null} |
|
{"step": 1180, "epoch": 0.48060278993992467, "loss": 1.6785, "learning_rate": 0.00011112620484693223, "train_runtime": null} |
|
{"step": 1190, "epoch": 0.48467569493941554, "loss": 1.5799, "learning_rate": 0.0001098140241677728, "train_runtime": null} |
|
{"step": 1200, "epoch": 0.4887485999389064, "loss": 1.5966, "learning_rate": 0.00010850013496188606, "train_runtime": null} |
|
{"step": 1210, "epoch": 0.4928215049383973, "loss": 1.7161, "learning_rate": 0.00010718476596468028, "train_runtime": null} |
|
{"step": 1220, "epoch": 0.4968944099378882, "loss": 1.6991, "learning_rate": 0.00010586814616918113, "train_runtime": null} |
|
{"step": 1230, "epoch": 0.5009673149373791, "loss": 1.7114, "learning_rate": 0.00010455050478616617, "train_runtime": null} |
|
{"step": 1240, "epoch": 0.50504021993687, "loss": 1.8174, "learning_rate": 0.00010323207120426142, "train_runtime": null} |
|
{"step": 1250, "epoch": 0.5091131249363609, "loss": 1.799, "learning_rate": 0.00010191307495000712, "train_runtime": null} |
|
{"step": 1260, "epoch": 0.5131860299358517, "loss": 1.4763, "learning_rate": 0.00010059374564789932, "train_runtime": null} |
|
{"step": 1270, "epoch": 0.5172589349353426, "loss": 1.5262, "learning_rate": 9.927431298041441e-05, "train_runtime": null} |
|
{"step": 1280, "epoch": 0.5213318399348336, "loss": 1.621, "learning_rate": 9.795500664802385e-05, "train_runtime": null} |
|
{"step": 1290, "epoch": 0.5254047449343244, "loss": 1.659, "learning_rate": 9.663605632920518e-05, "train_runtime": null} |
|
{"step": 1300, "epoch": 0.5294776499338153, "loss": 1.7666, "learning_rate": 9.53176916404576e-05, "train_runtime": null} |
|
{"step": 1310, "epoch": 0.5335505549333062, "loss": 1.6026, "learning_rate": 9.400014209632763e-05, "train_runtime": null} |
|
{"step": 1320, "epoch": 0.537623459932797, "loss": 1.7769, "learning_rate": 9.268363706945312e-05, "train_runtime": null} |
|
{"step": 1330, "epoch": 0.5416963649322879, "loss": 1.5157, "learning_rate": 9.136840575063147e-05, "train_runtime": null} |
|
{"step": 1340, "epoch": 0.5457692699317789, "loss": 1.8756, "learning_rate": 9.005467710891987e-05, "train_runtime": null} |
|
{"step": 1350, "epoch": 0.5498421749312697, "loss": 1.5708, "learning_rate": 8.874267985177394e-05, "train_runtime": null} |
|
{"step": 1360, "epoch": 0.5539150799307606, "loss": 1.6876, "learning_rate": 8.743264238523199e-05, "train_runtime": null} |
|
{"step": 1370, "epoch": 0.5579879849302515, "loss": 1.6694, "learning_rate": 8.612479277415174e-05, "train_runtime": null} |
|
{"step": 1380, "epoch": 0.5620608899297423, "loss": 1.5838, "learning_rate": 8.481935870250637e-05, "train_runtime": null} |
|
{"step": 1390, "epoch": 0.5661337949292333, "loss": 1.6321, "learning_rate": 8.351656743374709e-05, "train_runtime": null} |
|
{"step": 1400, "epoch": 0.5702066999287242, "loss": 1.5016, "learning_rate": 8.22166457712386e-05, "train_runtime": null} |
|
{"step": 1410, "epoch": 0.5742796049282151, "loss": 1.5412, "learning_rate": 8.091982001877493e-05, "train_runtime": null} |
|
{"step": 1420, "epoch": 0.5783525099277059, "loss": 1.7629, "learning_rate": 7.962631594118208e-05, "train_runtime": null} |
|
{"step": 1430, "epoch": 0.5824254149271968, "loss": 1.6342, "learning_rate": 7.833635872501462e-05, "train_runtime": null} |
|
{"step": 1440, "epoch": 0.5864983199266877, "loss": 1.5803, "learning_rate": 7.705017293935281e-05, "train_runtime": null} |
|
{"step": 1450, "epoch": 0.5905712249261786, "loss": 1.8459, "learning_rate": 7.576798249670725e-05, "train_runtime": null} |
|
{"step": 1460, "epoch": 0.5946441299256695, "loss": 1.5263, "learning_rate": 7.449001061403809e-05, "train_runtime": null} |
|
{"step": 1470, "epoch": 0.5987170349251604, "loss": 1.5965, "learning_rate": 7.321647977389479e-05, "train_runtime": null} |
|
{"step": 1480, "epoch": 0.6027899399246512, "loss": 1.5667, "learning_rate": 7.194761168568445e-05, "train_runtime": null} |
|
{"step": 1490, "epoch": 0.6068628449241421, "loss": 1.4813, "learning_rate": 7.068362724707392e-05, "train_runtime": null} |
|
{"step": 1500, "epoch": 0.610935749923633, "loss": 1.6786, "learning_rate": 6.942474650553408e-05, "train_runtime": null} |
|
{"step": 1510, "epoch": 0.615008654923124, "loss": 1.6323, "learning_rate": 6.817118862003132e-05, "train_runtime": null} |
|
{"step": 1520, "epoch": 0.6190815599226148, "loss": 1.8166, "learning_rate": 6.692317182287432e-05, "train_runtime": null} |
|
{"step": 1530, "epoch": 0.6231544649221057, "loss": 1.6117, "learning_rate": 6.568091338172195e-05, "train_runtime": null} |
|
{"step": 1540, "epoch": 0.6272273699215966, "loss": 1.6222, "learning_rate": 6.444462956175876e-05, "train_runtime": null} |
|
{"step": 1550, "epoch": 0.6313002749210874, "loss": 1.6048, "learning_rate": 6.321453558804571e-05, "train_runtime": null} |
|
{"step": 1560, "epoch": 0.6353731799205784, "loss": 1.7073, "learning_rate": 6.199084560805121e-05, "train_runtime": null} |
|
{"step": 1570, "epoch": 0.6394460849200693, "loss": 1.8152, "learning_rate": 6.077377265437043e-05, "train_runtime": null} |
|
{"step": 1580, "epoch": 0.6435189899195601, "loss": 1.7108, "learning_rate": 5.956352860763809e-05, "train_runtime": null} |
|
{"step": 1590, "epoch": 0.647591894919051, "loss": 1.4793, "learning_rate": 5.83603241596423e-05, "train_runtime": null} |
|
{"step": 1600, "epoch": 0.6516647999185419, "loss": 1.752, "learning_rate": 5.716436877664517e-05, "train_runtime": null} |
|
{"step": 1610, "epoch": 0.6557377049180327, "loss": 1.7172, "learning_rate": 5.5975870662916484e-05, "train_runtime": null} |
|
{"step": 1620, "epoch": 0.6598106099175237, "loss": 1.5377, "learning_rate": 5.4795036724487735e-05, "train_runtime": null} |
|
{"step": 1630, "epoch": 0.6638835149170146, "loss": 1.4547, "learning_rate": 5.362207253313136e-05, "train_runtime": null} |
|
{"step": 1640, "epoch": 0.6679564199165055, "loss": 1.6086, "learning_rate": 5.245718229057326e-05, "train_runtime": null} |
|
{"step": 1650, "epoch": 0.6720293249159963, "loss": 1.6018, "learning_rate": 5.1300568792942535e-05, "train_runtime": null} |
|
{"step": 1660, "epoch": 0.6761022299154872, "loss": 1.7574, "learning_rate": 5.015243339546731e-05, "train_runtime": null} |
|
{"step": 1670, "epoch": 0.6801751349149782, "loss": 1.7425, "learning_rate": 4.90129759774202e-05, "train_runtime": null} |
|
{"step": 1680, "epoch": 0.684248039914469, "loss": 1.6121, "learning_rate": 4.7882394907321674e-05, "train_runtime": null} |
|
{"step": 1690, "epoch": 0.6883209449139599, "loss": 1.6416, "learning_rate": 4.676088700840575e-05, "train_runtime": null} |
|
{"step": 1700, "epoch": 0.6923938499134508, "loss": 1.6675, "learning_rate": 4.564864752435509e-05, "train_runtime": null} |
|
{"step": 1710, "epoch": 0.6964667549129416, "loss": 1.6641, "learning_rate": 4.454587008531097e-05, "train_runtime": null} |
|
{"step": 1720, "epoch": 0.7005396599124325, "loss": 1.6978, "learning_rate": 4.345274667416399e-05, "train_runtime": null} |
|
{"step": 1730, "epoch": 0.7046125649119235, "loss": 1.681, "learning_rate": 4.2369467593131926e-05, "train_runtime": null} |
|
{"step": 1740, "epoch": 0.7086854699114143, "loss": 1.5405, "learning_rate": 4.129622143062985e-05, "train_runtime": null} |
|
{"step": 1750, "epoch": 0.7127583749109052, "loss": 1.7217, "learning_rate": 4.02331950284387e-05, "train_runtime": null} |
|
{"step": 1760, "epoch": 0.7168312799103961, "loss": 1.5948, "learning_rate": 3.918057344917795e-05, "train_runtime": null} |
|
{"step": 1770, "epoch": 0.720904184909887, "loss": 1.6678, "learning_rate": 3.813853994408793e-05, "train_runtime": null} |
|
{"step": 1780, "epoch": 0.7249770899093778, "loss": 1.682, "learning_rate": 3.7107275921127704e-05, "train_runtime": null} |
|
{"step": 1790, "epoch": 0.7290499949088688, "loss": 1.7135, "learning_rate": 3.60869609133936e-05, "train_runtime": null} |
|
{"step": 1800, "epoch": 0.7331228999083597, "loss": 1.4999, "learning_rate": 3.507777254786425e-05, "train_runtime": null} |
|
{"step": 1810, "epoch": 0.7371958049078505, "loss": 1.6202, "learning_rate": 3.407988651447738e-05, "train_runtime": null} |
|
{"step": 1820, "epoch": 0.7412687099073414, "loss": 1.5502, "learning_rate": 3.3093476535544074e-05, "train_runtime": null} |
|
{"step": 1830, "epoch": 0.7453416149068323, "loss": 1.6333, "learning_rate": 3.211871433550513e-05, "train_runtime": null} |
|
{"step": 1840, "epoch": 0.7494145199063232, "loss": 1.4907, "learning_rate": 3.1155769611035825e-05, "train_runtime": null} |
|
{"step": 1850, "epoch": 0.7534874249058141, "loss": 1.8018, "learning_rate": 3.0204810001503124e-05, "train_runtime": null} |
|
{"step": 1860, "epoch": 0.757560329905305, "loss": 1.7305, "learning_rate": 2.9266001059781258e-05, "train_runtime": null} |
|
{"step": 1870, "epoch": 0.7616332349047958, "loss": 1.6642, "learning_rate": 2.83395062234308e-05, "train_runtime": null} |
|
{"step": 1880, "epoch": 0.7657061399042867, "loss": 1.8349, "learning_rate": 2.742548678624548e-05, "train_runtime": null} |
|
{"step": 1890, "epoch": 0.7697790449037776, "loss": 1.7883, "learning_rate": 2.6524101870172846e-05, "train_runtime": null} |
|
{"step": 1900, "epoch": 0.7738519499032686, "loss": 1.6654, "learning_rate": 2.5635508397612262e-05, "train_runtime": null} |
|
{"step": 1910, "epoch": 0.7779248549027594, "loss": 1.6478, "learning_rate": 2.4759861064096603e-05, "train_runtime": null} |
|
{"step": 1920, "epoch": 0.7819977599022503, "loss": 1.5355, "learning_rate": 2.3897312311360955e-05, "train_runtime": null} |
|
{"step": 1930, "epoch": 0.7860706649017412, "loss": 1.5565, "learning_rate": 2.3048012300804222e-05, "train_runtime": null} |
|
{"step": 1940, "epoch": 0.790143569901232, "loss": 1.636, "learning_rate": 2.221210888734736e-05, "train_runtime": null} |
|
{"step": 1950, "epoch": 0.7942164749007229, "loss": 1.7844, "learning_rate": 2.13897475936933e-05, "train_runtime": null} |
|
{"step": 1960, "epoch": 0.7982893799002139, "loss": 1.5874, "learning_rate": 2.0581071584992818e-05, "train_runtime": null} |
|
{"step": 1970, "epoch": 0.8023622848997047, "loss": 1.6279, "learning_rate": 1.9786221643920844e-05, "train_runtime": null} |
|
{"step": 1980, "epoch": 0.8064351898991956, "loss": 1.6269, "learning_rate": 1.9005336146167686e-05, "train_runtime": null} |
|
{"step": 1990, "epoch": 0.8105080948986865, "loss": 1.5308, "learning_rate": 1.8238551036349028e-05, "train_runtime": null} |
|
{"step": 2000, "epoch": 0.8145809998981773, "loss": 1.5568, "learning_rate": 1.7485999804339348e-05, "train_runtime": null} |
|
{"step": 2010, "epoch": 0.8186539048976683, "loss": 1.6787, "learning_rate": 1.6747813462032615e-05, "train_runtime": null} |
|
{"step": 2020, "epoch": 0.8227268098971592, "loss": 1.6416, "learning_rate": 1.6024120520534326e-05, "train_runtime": null} |
|
{"step": 2030, "epoch": 0.82679971489665, "loss": 1.69, "learning_rate": 1.5315046967789082e-05, "train_runtime": null} |
|
{"step": 2040, "epoch": 0.8308726198961409, "loss": 1.6092, "learning_rate": 1.4620716246647203e-05, "train_runtime": null} |
|
{"step": 2050, "epoch": 0.8349455248956318, "loss": 1.6848, "learning_rate": 1.394124923337462e-05, "train_runtime": null} |
|
{"step": 2060, "epoch": 0.8390184298951227, "loss": 1.5843, "learning_rate": 1.3276764216609294e-05, "train_runtime": null} |
|
{"step": 2070, "epoch": 0.8430913348946136, "loss": 1.5443, "learning_rate": 1.2627376876768593e-05, "train_runtime": null} |
|
{"step": 2080, "epoch": 0.8471642398941045, "loss": 1.6073, "learning_rate": 1.1993200265910131e-05, "train_runtime": null} |
|
{"step": 2090, "epoch": 0.8512371448935954, "loss": 1.8038, "learning_rate": 1.1374344788050829e-05, "train_runtime": null} |
|
{"step": 2100, "epoch": 0.8553100498930862, "loss": 1.5022, "learning_rate": 1.0770918179946388e-05, "train_runtime": null} |
|
{"step": 2110, "epoch": 0.8593829548925771, "loss": 1.7432, "learning_rate": 1.0183025492335408e-05, "train_runtime": null} |
|
{"step": 2120, "epoch": 0.863455859892068, "loss": 1.6706, "learning_rate": 9.610769071651193e-06, "train_runtime": null} |
|
{"step": 2130, "epoch": 0.867528764891559, "loss": 1.5901, "learning_rate": 9.05424854220408e-06, "train_runtime": null} |
|
{"step": 2140, "epoch": 0.8716016698910498, "loss": 1.6414, "learning_rate": 8.513560788837916e-06, "train_runtime": null} |
|
{"step": 2150, "epoch": 0.8756745748905407, "loss": 1.6038, "learning_rate": 7.988799940063297e-06, "train_runtime": null} |
|
{"step": 2160, "epoch": 0.8797474798900315, "loss": 1.6661, "learning_rate": 7.480057351670688e-06, "train_runtime": null} |
|
{"step": 2170, "epoch": 0.8838203848895224, "loss": 1.6066, "learning_rate": 6.987421590826282e-06, "train_runtime": null} |
|
{"step": 2180, "epoch": 0.8878932898890134, "loss": 1.6816, "learning_rate": 6.510978420653335e-06, "train_runtime": null} |
|
{"step": 2190, "epoch": 0.8919661948885043, "loss": 1.7702, "learning_rate": 6.050810785301597e-06, "train_runtime": null} |
|
{"step": 2200, "epoch": 0.8960390998879951, "loss": 1.5417, "learning_rate": 5.606998795507578e-06, "train_runtime": null} |
|
{"step": 2210, "epoch": 0.900112004887486, "loss": 1.5119, "learning_rate": 5.1796197146479985e-06, "train_runtime": null} |
|
{"step": 2220, "epoch": 0.9041849098869769, "loss": 1.5287, "learning_rate": 4.768747945288987e-06, "train_runtime": null} |
|
{"step": 2230, "epoch": 0.9082578148864677, "loss": 1.5842, "learning_rate": 4.37445501623337e-06, "train_runtime": null} |
|
{"step": 2240, "epoch": 0.9123307198859587, "loss": 1.5514, "learning_rate": 3.996809570068127e-06, "train_runtime": null} |
|
{"step": 2250, "epoch": 0.9164036248854496, "loss": 1.5493, "learning_rate": 3.635877351214445e-06, "train_runtime": null} |
|
{"step": 2260, "epoch": 0.9204765298849404, "loss": 1.5494, "learning_rate": 3.291721194482189e-06, "train_runtime": null} |
|
{"step": 2270, "epoch": 0.9245494348844313, "loss": 1.6294, "learning_rate": 2.9644010141310017e-06, "train_runtime": null} |
|
{"step": 2280, "epoch": 0.9286223398839222, "loss": 1.7332, "learning_rate": 2.65397379343979e-06, "train_runtime": null} |
|
{"step": 2290, "epoch": 0.932695244883413, "loss": 1.6691, "learning_rate": 2.3604935747865377e-06, "train_runtime": null} |
|
{"step": 2300, "epoch": 0.936768149882904, "loss": 1.5358, "learning_rate": 2.0840114502400086e-06, "train_runtime": null} |
|
{"step": 2310, "epoch": 0.9408410548823949, "loss": 1.6318, "learning_rate": 1.8245755526650753e-06, "train_runtime": null} |
|
{"step": 2320, "epoch": 0.9449139598818858, "loss": 1.5595, "learning_rate": 1.5822310473433411e-06, "train_runtime": null} |
|
{"step": 2330, "epoch": 0.9489868648813766, "loss": 1.7522, "learning_rate": 1.357020124110231e-06, "train_runtime": null} |
|
{"step": 2340, "epoch": 0.9530597698808675, "loss": 1.5307, "learning_rate": 1.1489819900101784e-06, "train_runtime": null} |
|
{"step": 2350, "epoch": 0.9571326748803585, "loss": 1.5633, "learning_rate": 9.581528624710734e-07, "train_runtime": null} |
|
{"step": 2360, "epoch": 0.9612055798798493, "loss": 1.6927, "learning_rate": 7.845659629990842e-07, "train_runtime": null} |
|
{"step": 2370, "epoch": 0.9652784848793402, "loss": 1.815, "learning_rate": 6.282515113952281e-07, "train_runtime": null} |
|
{"step": 2380, "epoch": 0.9693513898788311, "loss": 1.5694, "learning_rate": 4.892367204943016e-07, "train_runtime": null} |
|
{"step": 2390, "epoch": 0.9734242948783219, "loss": 1.6555, "learning_rate": 3.6754579142741495e-07, "train_runtime": null} |
|
{"step": 2400, "epoch": 0.9774971998778128, "loss": 1.7315, "learning_rate": 2.6319990940885107e-07, "train_runtime": null} |
|
{"step": 2410, "epoch": 0.9815701048773038, "loss": 1.5847, "learning_rate": 1.762172400478601e-07, "train_runtime": null} |
|
{"step": 2420, "epoch": 0.9856430098767947, "loss": 1.54, "learning_rate": 1.0661292618624474e-07, "train_runtime": null} |
|
{"step": 2430, "epoch": 0.9897159148762855, "loss": 1.4109, "learning_rate": 5.439908526212456e-08, "train_runtime": null} |
|
{"step": 2440, "epoch": 0.9937888198757764, "loss": 1.5128, "learning_rate": 1.9584807200423438e-08, "train_runtime": null} |
|
{"step": 2450, "epoch": 0.9978617248752673, "loss": 1.6287, "learning_rate": 2.176152830357658e-09, "train_runtime": null} |
|
{"step": 2455, "epoch": 0.9998981773750127, "loss": null, "learning_rate": null, "train_runtime": 9748.6189} |
|
|