Muhammad Khalifa
add llama-2 13b plan model
5144cea
raw
history blame
55.6 kB
{
"best_metric": 0.7206348776817322,
"best_model_checkpoint": "checkpoints/instrucode/with_input/decomp_plan/llama-2-13b/checkpoint-4200",
"epoch": 2.7009646302250805,
"eval_steps": 200,
"global_step": 4200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.4995,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.4447,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 8.999999999999999e-05,
"loss": 1.2253,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 0.00011999999999999999,
"loss": 0.9599,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 0.00015,
"loss": 0.8987,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 0.00017999999999999998,
"loss": 0.8692,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 0.00020999999999999998,
"loss": 0.8347,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 0.00023999999999999998,
"loss": 0.8459,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 0.00027,
"loss": 0.835,
"step": 90
},
{
"epoch": 0.06,
"learning_rate": 0.0003,
"loss": 0.8249,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 0.00029960912052117263,
"loss": 0.8187,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 0.0002992182410423453,
"loss": 0.823,
"step": 120
},
{
"epoch": 0.08,
"learning_rate": 0.0002988273615635179,
"loss": 0.8365,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 0.00029843648208469054,
"loss": 0.8123,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 0.00029804560260586314,
"loss": 0.8179,
"step": 150
},
{
"epoch": 0.1,
"learning_rate": 0.0002976547231270358,
"loss": 0.8055,
"step": 160
},
{
"epoch": 0.11,
"learning_rate": 0.00029726384364820845,
"loss": 0.8009,
"step": 170
},
{
"epoch": 0.12,
"learning_rate": 0.00029687296416938105,
"loss": 0.8018,
"step": 180
},
{
"epoch": 0.12,
"learning_rate": 0.0002964820846905537,
"loss": 0.7961,
"step": 190
},
{
"epoch": 0.13,
"learning_rate": 0.00029609120521172636,
"loss": 0.8072,
"step": 200
},
{
"epoch": 0.13,
"eval_loss": 0.793488621711731,
"eval_runtime": 303.5091,
"eval_samples_per_second": 6.59,
"eval_steps_per_second": 0.824,
"step": 200
},
{
"epoch": 0.14,
"learning_rate": 0.000295700325732899,
"loss": 0.8007,
"step": 210
},
{
"epoch": 0.14,
"learning_rate": 0.0002953094462540716,
"loss": 0.8051,
"step": 220
},
{
"epoch": 0.15,
"learning_rate": 0.0002949185667752443,
"loss": 0.7764,
"step": 230
},
{
"epoch": 0.15,
"learning_rate": 0.00029452768729641693,
"loss": 0.8131,
"step": 240
},
{
"epoch": 0.16,
"learning_rate": 0.00029413680781758953,
"loss": 0.7833,
"step": 250
},
{
"epoch": 0.17,
"learning_rate": 0.0002937459283387622,
"loss": 0.779,
"step": 260
},
{
"epoch": 0.17,
"learning_rate": 0.00029335504885993484,
"loss": 0.7958,
"step": 270
},
{
"epoch": 0.18,
"learning_rate": 0.00029296416938110744,
"loss": 0.7782,
"step": 280
},
{
"epoch": 0.19,
"learning_rate": 0.0002925732899022801,
"loss": 0.7835,
"step": 290
},
{
"epoch": 0.19,
"learning_rate": 0.00029218241042345275,
"loss": 0.7805,
"step": 300
},
{
"epoch": 0.2,
"learning_rate": 0.0002917915309446254,
"loss": 0.7819,
"step": 310
},
{
"epoch": 0.21,
"learning_rate": 0.000291400651465798,
"loss": 0.7799,
"step": 320
},
{
"epoch": 0.21,
"learning_rate": 0.00029100977198697066,
"loss": 0.7681,
"step": 330
},
{
"epoch": 0.22,
"learning_rate": 0.00029061889250814327,
"loss": 0.8061,
"step": 340
},
{
"epoch": 0.23,
"learning_rate": 0.0002902280130293159,
"loss": 0.7578,
"step": 350
},
{
"epoch": 0.23,
"learning_rate": 0.0002898371335504886,
"loss": 0.7675,
"step": 360
},
{
"epoch": 0.24,
"learning_rate": 0.00028944625407166123,
"loss": 0.7766,
"step": 370
},
{
"epoch": 0.24,
"learning_rate": 0.0002890553745928339,
"loss": 0.7976,
"step": 380
},
{
"epoch": 0.25,
"learning_rate": 0.0002886644951140065,
"loss": 0.783,
"step": 390
},
{
"epoch": 0.26,
"learning_rate": 0.00028827361563517914,
"loss": 0.7746,
"step": 400
},
{
"epoch": 0.26,
"eval_loss": 0.7713018655776978,
"eval_runtime": 306.4904,
"eval_samples_per_second": 6.525,
"eval_steps_per_second": 0.816,
"step": 400
},
{
"epoch": 0.26,
"learning_rate": 0.00028788273615635174,
"loss": 0.7778,
"step": 410
},
{
"epoch": 0.27,
"learning_rate": 0.0002874918566775244,
"loss": 0.7719,
"step": 420
},
{
"epoch": 0.28,
"learning_rate": 0.00028710097719869705,
"loss": 0.7835,
"step": 430
},
{
"epoch": 0.28,
"learning_rate": 0.00028671009771986966,
"loss": 0.749,
"step": 440
},
{
"epoch": 0.29,
"learning_rate": 0.0002863192182410423,
"loss": 0.7693,
"step": 450
},
{
"epoch": 0.3,
"learning_rate": 0.00028592833876221497,
"loss": 0.7759,
"step": 460
},
{
"epoch": 0.3,
"learning_rate": 0.0002855374592833876,
"loss": 0.7614,
"step": 470
},
{
"epoch": 0.31,
"learning_rate": 0.0002851465798045602,
"loss": 0.763,
"step": 480
},
{
"epoch": 0.32,
"learning_rate": 0.0002847557003257329,
"loss": 0.7713,
"step": 490
},
{
"epoch": 0.32,
"learning_rate": 0.0002843648208469055,
"loss": 0.774,
"step": 500
},
{
"epoch": 0.33,
"learning_rate": 0.00028397394136807813,
"loss": 0.7697,
"step": 510
},
{
"epoch": 0.33,
"learning_rate": 0.0002835830618892508,
"loss": 0.7683,
"step": 520
},
{
"epoch": 0.34,
"learning_rate": 0.00028319218241042344,
"loss": 0.7788,
"step": 530
},
{
"epoch": 0.35,
"learning_rate": 0.00028280130293159605,
"loss": 0.7551,
"step": 540
},
{
"epoch": 0.35,
"learning_rate": 0.0002824104234527687,
"loss": 0.7747,
"step": 550
},
{
"epoch": 0.36,
"learning_rate": 0.00028201954397394136,
"loss": 0.7494,
"step": 560
},
{
"epoch": 0.37,
"learning_rate": 0.000281628664495114,
"loss": 0.7749,
"step": 570
},
{
"epoch": 0.37,
"learning_rate": 0.0002812377850162866,
"loss": 0.7375,
"step": 580
},
{
"epoch": 0.38,
"learning_rate": 0.00028084690553745927,
"loss": 0.7558,
"step": 590
},
{
"epoch": 0.39,
"learning_rate": 0.00028045602605863187,
"loss": 0.7596,
"step": 600
},
{
"epoch": 0.39,
"eval_loss": 0.7580611109733582,
"eval_runtime": 303.7189,
"eval_samples_per_second": 6.585,
"eval_steps_per_second": 0.823,
"step": 600
},
{
"epoch": 0.39,
"learning_rate": 0.0002800651465798045,
"loss": 0.774,
"step": 610
},
{
"epoch": 0.4,
"learning_rate": 0.0002796742671009772,
"loss": 0.7443,
"step": 620
},
{
"epoch": 0.41,
"learning_rate": 0.00027928338762214983,
"loss": 0.7566,
"step": 630
},
{
"epoch": 0.41,
"learning_rate": 0.00027889250814332244,
"loss": 0.758,
"step": 640
},
{
"epoch": 0.42,
"learning_rate": 0.0002785016286644951,
"loss": 0.7766,
"step": 650
},
{
"epoch": 0.42,
"learning_rate": 0.00027811074918566775,
"loss": 0.7652,
"step": 660
},
{
"epoch": 0.43,
"learning_rate": 0.00027771986970684035,
"loss": 0.7578,
"step": 670
},
{
"epoch": 0.44,
"learning_rate": 0.000277328990228013,
"loss": 0.7619,
"step": 680
},
{
"epoch": 0.44,
"learning_rate": 0.00027693811074918566,
"loss": 0.7523,
"step": 690
},
{
"epoch": 0.45,
"learning_rate": 0.00027654723127035826,
"loss": 0.7486,
"step": 700
},
{
"epoch": 0.46,
"learning_rate": 0.0002761563517915309,
"loss": 0.7454,
"step": 710
},
{
"epoch": 0.46,
"learning_rate": 0.00027576547231270357,
"loss": 0.7639,
"step": 720
},
{
"epoch": 0.47,
"learning_rate": 0.0002753745928338762,
"loss": 0.7692,
"step": 730
},
{
"epoch": 0.48,
"learning_rate": 0.0002749837133550488,
"loss": 0.7609,
"step": 740
},
{
"epoch": 0.48,
"learning_rate": 0.0002745928338762215,
"loss": 0.7729,
"step": 750
},
{
"epoch": 0.49,
"learning_rate": 0.0002742019543973941,
"loss": 0.7515,
"step": 760
},
{
"epoch": 0.5,
"learning_rate": 0.00027381107491856674,
"loss": 0.746,
"step": 770
},
{
"epoch": 0.5,
"learning_rate": 0.0002734201954397394,
"loss": 0.7406,
"step": 780
},
{
"epoch": 0.51,
"learning_rate": 0.00027302931596091205,
"loss": 0.7529,
"step": 790
},
{
"epoch": 0.51,
"learning_rate": 0.00027263843648208465,
"loss": 0.7213,
"step": 800
},
{
"epoch": 0.51,
"eval_loss": 0.7531036138534546,
"eval_runtime": 304.2051,
"eval_samples_per_second": 6.575,
"eval_steps_per_second": 0.822,
"step": 800
},
{
"epoch": 0.52,
"learning_rate": 0.0002722475570032573,
"loss": 0.7395,
"step": 810
},
{
"epoch": 0.53,
"learning_rate": 0.00027185667752442996,
"loss": 0.7465,
"step": 820
},
{
"epoch": 0.53,
"learning_rate": 0.0002714657980456026,
"loss": 0.7671,
"step": 830
},
{
"epoch": 0.54,
"learning_rate": 0.0002710749185667752,
"loss": 0.7289,
"step": 840
},
{
"epoch": 0.55,
"learning_rate": 0.00027068403908794787,
"loss": 0.7453,
"step": 850
},
{
"epoch": 0.55,
"learning_rate": 0.00027029315960912047,
"loss": 0.7409,
"step": 860
},
{
"epoch": 0.56,
"learning_rate": 0.0002699022801302931,
"loss": 0.7635,
"step": 870
},
{
"epoch": 0.57,
"learning_rate": 0.0002695114006514658,
"loss": 0.7356,
"step": 880
},
{
"epoch": 0.57,
"learning_rate": 0.00026912052117263844,
"loss": 0.7526,
"step": 890
},
{
"epoch": 0.58,
"learning_rate": 0.00026872964169381104,
"loss": 0.7664,
"step": 900
},
{
"epoch": 0.59,
"learning_rate": 0.0002683387622149837,
"loss": 0.7509,
"step": 910
},
{
"epoch": 0.59,
"learning_rate": 0.00026794788273615635,
"loss": 0.7422,
"step": 920
},
{
"epoch": 0.6,
"learning_rate": 0.00026755700325732895,
"loss": 0.7612,
"step": 930
},
{
"epoch": 0.6,
"learning_rate": 0.0002671661237785016,
"loss": 0.7545,
"step": 940
},
{
"epoch": 0.61,
"learning_rate": 0.0002667752442996742,
"loss": 0.7318,
"step": 950
},
{
"epoch": 0.62,
"learning_rate": 0.00026638436482084686,
"loss": 0.7393,
"step": 960
},
{
"epoch": 0.62,
"learning_rate": 0.0002659934853420195,
"loss": 0.7543,
"step": 970
},
{
"epoch": 0.63,
"learning_rate": 0.00026560260586319217,
"loss": 0.7329,
"step": 980
},
{
"epoch": 0.64,
"learning_rate": 0.0002652117263843648,
"loss": 0.747,
"step": 990
},
{
"epoch": 0.64,
"learning_rate": 0.00026482084690553743,
"loss": 0.7519,
"step": 1000
},
{
"epoch": 0.64,
"eval_loss": 0.7442443370819092,
"eval_runtime": 308.0159,
"eval_samples_per_second": 6.493,
"eval_steps_per_second": 0.812,
"step": 1000
},
{
"epoch": 0.65,
"learning_rate": 0.0002644299674267101,
"loss": 0.738,
"step": 1010
},
{
"epoch": 0.66,
"learning_rate": 0.0002640390879478827,
"loss": 0.7544,
"step": 1020
},
{
"epoch": 0.66,
"learning_rate": 0.00026364820846905534,
"loss": 0.7336,
"step": 1030
},
{
"epoch": 0.67,
"learning_rate": 0.000263257328990228,
"loss": 0.7345,
"step": 1040
},
{
"epoch": 0.68,
"learning_rate": 0.0002628664495114006,
"loss": 0.7431,
"step": 1050
},
{
"epoch": 0.68,
"learning_rate": 0.00026247557003257325,
"loss": 0.7625,
"step": 1060
},
{
"epoch": 0.69,
"learning_rate": 0.0002620846905537459,
"loss": 0.7267,
"step": 1070
},
{
"epoch": 0.69,
"learning_rate": 0.00026169381107491856,
"loss": 0.7438,
"step": 1080
},
{
"epoch": 0.7,
"learning_rate": 0.0002613029315960912,
"loss": 0.7245,
"step": 1090
},
{
"epoch": 0.71,
"learning_rate": 0.0002609120521172638,
"loss": 0.7566,
"step": 1100
},
{
"epoch": 0.71,
"learning_rate": 0.0002605211726384365,
"loss": 0.7371,
"step": 1110
},
{
"epoch": 0.72,
"learning_rate": 0.0002601302931596091,
"loss": 0.7389,
"step": 1120
},
{
"epoch": 0.73,
"learning_rate": 0.00025973941368078173,
"loss": 0.7326,
"step": 1130
},
{
"epoch": 0.73,
"learning_rate": 0.0002593485342019544,
"loss": 0.7092,
"step": 1140
},
{
"epoch": 0.74,
"learning_rate": 0.00025895765472312704,
"loss": 0.7479,
"step": 1150
},
{
"epoch": 0.75,
"learning_rate": 0.00025856677524429964,
"loss": 0.7289,
"step": 1160
},
{
"epoch": 0.75,
"learning_rate": 0.0002581758957654723,
"loss": 0.7506,
"step": 1170
},
{
"epoch": 0.76,
"learning_rate": 0.00025778501628664495,
"loss": 0.7388,
"step": 1180
},
{
"epoch": 0.77,
"learning_rate": 0.00025739413680781755,
"loss": 0.756,
"step": 1190
},
{
"epoch": 0.77,
"learning_rate": 0.0002570032573289902,
"loss": 0.7269,
"step": 1200
},
{
"epoch": 0.77,
"eval_loss": 0.7402953505516052,
"eval_runtime": 304.0596,
"eval_samples_per_second": 6.578,
"eval_steps_per_second": 0.822,
"step": 1200
},
{
"epoch": 0.78,
"learning_rate": 0.0002566123778501628,
"loss": 0.729,
"step": 1210
},
{
"epoch": 0.78,
"learning_rate": 0.00025622149837133546,
"loss": 0.7473,
"step": 1220
},
{
"epoch": 0.79,
"learning_rate": 0.0002558306188925081,
"loss": 0.7174,
"step": 1230
},
{
"epoch": 0.8,
"learning_rate": 0.0002554397394136808,
"loss": 0.7526,
"step": 1240
},
{
"epoch": 0.8,
"learning_rate": 0.00025504885993485343,
"loss": 0.7502,
"step": 1250
},
{
"epoch": 0.81,
"learning_rate": 0.00025465798045602603,
"loss": 0.7335,
"step": 1260
},
{
"epoch": 0.82,
"learning_rate": 0.0002542671009771987,
"loss": 0.7452,
"step": 1270
},
{
"epoch": 0.82,
"learning_rate": 0.0002538762214983713,
"loss": 0.7534,
"step": 1280
},
{
"epoch": 0.83,
"learning_rate": 0.00025348534201954394,
"loss": 0.7408,
"step": 1290
},
{
"epoch": 0.84,
"learning_rate": 0.0002530944625407166,
"loss": 0.7582,
"step": 1300
},
{
"epoch": 0.84,
"learning_rate": 0.0002527035830618892,
"loss": 0.7575,
"step": 1310
},
{
"epoch": 0.85,
"learning_rate": 0.00025231270358306185,
"loss": 0.7391,
"step": 1320
},
{
"epoch": 0.86,
"learning_rate": 0.0002519218241042345,
"loss": 0.7486,
"step": 1330
},
{
"epoch": 0.86,
"learning_rate": 0.00025153094462540716,
"loss": 0.7227,
"step": 1340
},
{
"epoch": 0.87,
"learning_rate": 0.0002511400651465798,
"loss": 0.7511,
"step": 1350
},
{
"epoch": 0.87,
"learning_rate": 0.0002507491856677524,
"loss": 0.7442,
"step": 1360
},
{
"epoch": 0.88,
"learning_rate": 0.0002503583061889251,
"loss": 0.7544,
"step": 1370
},
{
"epoch": 0.89,
"learning_rate": 0.0002499674267100977,
"loss": 0.7456,
"step": 1380
},
{
"epoch": 0.89,
"learning_rate": 0.00024957654723127033,
"loss": 0.7363,
"step": 1390
},
{
"epoch": 0.9,
"learning_rate": 0.000249185667752443,
"loss": 0.7289,
"step": 1400
},
{
"epoch": 0.9,
"eval_loss": 0.7365428805351257,
"eval_runtime": 304.0198,
"eval_samples_per_second": 6.579,
"eval_steps_per_second": 0.822,
"step": 1400
},
{
"epoch": 0.91,
"learning_rate": 0.0002487947882736156,
"loss": 0.7376,
"step": 1410
},
{
"epoch": 0.91,
"learning_rate": 0.00024840390879478824,
"loss": 0.7535,
"step": 1420
},
{
"epoch": 0.92,
"learning_rate": 0.0002480130293159609,
"loss": 0.7152,
"step": 1430
},
{
"epoch": 0.93,
"learning_rate": 0.00024762214983713355,
"loss": 0.7264,
"step": 1440
},
{
"epoch": 0.93,
"learning_rate": 0.00024723127035830616,
"loss": 0.7113,
"step": 1450
},
{
"epoch": 0.94,
"learning_rate": 0.0002468403908794788,
"loss": 0.7281,
"step": 1460
},
{
"epoch": 0.95,
"learning_rate": 0.0002464495114006514,
"loss": 0.7356,
"step": 1470
},
{
"epoch": 0.95,
"learning_rate": 0.00024605863192182407,
"loss": 0.7212,
"step": 1480
},
{
"epoch": 0.96,
"learning_rate": 0.0002456677524429967,
"loss": 0.7294,
"step": 1490
},
{
"epoch": 0.96,
"learning_rate": 0.0002452768729641694,
"loss": 0.7432,
"step": 1500
},
{
"epoch": 0.97,
"learning_rate": 0.00024488599348534203,
"loss": 0.7416,
"step": 1510
},
{
"epoch": 0.98,
"learning_rate": 0.00024449511400651463,
"loss": 0.7371,
"step": 1520
},
{
"epoch": 0.98,
"learning_rate": 0.00024410423452768726,
"loss": 0.76,
"step": 1530
},
{
"epoch": 0.99,
"learning_rate": 0.00024371335504885992,
"loss": 0.7283,
"step": 1540
},
{
"epoch": 1.0,
"learning_rate": 0.00024332247557003257,
"loss": 0.7315,
"step": 1550
},
{
"epoch": 1.0,
"learning_rate": 0.0002429315960912052,
"loss": 0.7212,
"step": 1560
},
{
"epoch": 1.01,
"learning_rate": 0.00024254071661237783,
"loss": 0.7209,
"step": 1570
},
{
"epoch": 1.02,
"learning_rate": 0.00024214983713355046,
"loss": 0.7333,
"step": 1580
},
{
"epoch": 1.02,
"learning_rate": 0.0002417589576547231,
"loss": 0.7179,
"step": 1590
},
{
"epoch": 1.03,
"learning_rate": 0.00024136807817589574,
"loss": 0.7051,
"step": 1600
},
{
"epoch": 1.03,
"eval_loss": 0.7355972528457642,
"eval_runtime": 304.1098,
"eval_samples_per_second": 6.577,
"eval_steps_per_second": 0.822,
"step": 1600
},
{
"epoch": 1.04,
"learning_rate": 0.0002409771986970684,
"loss": 0.6873,
"step": 1610
},
{
"epoch": 1.04,
"learning_rate": 0.000240586319218241,
"loss": 0.7022,
"step": 1620
},
{
"epoch": 1.05,
"learning_rate": 0.00024019543973941365,
"loss": 0.7307,
"step": 1630
},
{
"epoch": 1.05,
"learning_rate": 0.0002398045602605863,
"loss": 0.7194,
"step": 1640
},
{
"epoch": 1.06,
"learning_rate": 0.00023941368078175893,
"loss": 0.7111,
"step": 1650
},
{
"epoch": 1.07,
"learning_rate": 0.0002390228013029316,
"loss": 0.7039,
"step": 1660
},
{
"epoch": 1.07,
"learning_rate": 0.0002386319218241042,
"loss": 0.7046,
"step": 1670
},
{
"epoch": 1.08,
"learning_rate": 0.00023824104234527685,
"loss": 0.6964,
"step": 1680
},
{
"epoch": 1.09,
"learning_rate": 0.0002378501628664495,
"loss": 0.713,
"step": 1690
},
{
"epoch": 1.09,
"learning_rate": 0.00023745928338762213,
"loss": 0.6813,
"step": 1700
},
{
"epoch": 1.1,
"learning_rate": 0.00023706840390879478,
"loss": 0.7454,
"step": 1710
},
{
"epoch": 1.11,
"learning_rate": 0.00023667752442996739,
"loss": 0.7051,
"step": 1720
},
{
"epoch": 1.11,
"learning_rate": 0.00023628664495114004,
"loss": 0.7157,
"step": 1730
},
{
"epoch": 1.12,
"learning_rate": 0.00023589576547231267,
"loss": 0.7034,
"step": 1740
},
{
"epoch": 1.13,
"learning_rate": 0.00023550488599348532,
"loss": 0.6927,
"step": 1750
},
{
"epoch": 1.13,
"learning_rate": 0.00023511400651465798,
"loss": 0.7141,
"step": 1760
},
{
"epoch": 1.14,
"learning_rate": 0.00023472312703583058,
"loss": 0.7061,
"step": 1770
},
{
"epoch": 1.14,
"learning_rate": 0.00023433224755700324,
"loss": 0.7006,
"step": 1780
},
{
"epoch": 1.15,
"learning_rate": 0.00023394136807817586,
"loss": 0.6999,
"step": 1790
},
{
"epoch": 1.16,
"learning_rate": 0.00023355048859934852,
"loss": 0.6925,
"step": 1800
},
{
"epoch": 1.16,
"eval_loss": 0.7322565913200378,
"eval_runtime": 304.7894,
"eval_samples_per_second": 6.562,
"eval_steps_per_second": 0.82,
"step": 1800
},
{
"epoch": 1.16,
"learning_rate": 0.00023315960912052117,
"loss": 0.7099,
"step": 1810
},
{
"epoch": 1.17,
"learning_rate": 0.00023276872964169378,
"loss": 0.7004,
"step": 1820
},
{
"epoch": 1.18,
"learning_rate": 0.00023237785016286643,
"loss": 0.7131,
"step": 1830
},
{
"epoch": 1.18,
"learning_rate": 0.00023198697068403906,
"loss": 0.7261,
"step": 1840
},
{
"epoch": 1.19,
"learning_rate": 0.00023159609120521171,
"loss": 0.7246,
"step": 1850
},
{
"epoch": 1.2,
"learning_rate": 0.00023120521172638434,
"loss": 0.6893,
"step": 1860
},
{
"epoch": 1.2,
"learning_rate": 0.000230814332247557,
"loss": 0.7016,
"step": 1870
},
{
"epoch": 1.21,
"learning_rate": 0.0002304234527687296,
"loss": 0.7046,
"step": 1880
},
{
"epoch": 1.22,
"learning_rate": 0.00023003257328990225,
"loss": 0.7215,
"step": 1890
},
{
"epoch": 1.22,
"learning_rate": 0.0002296416938110749,
"loss": 0.6976,
"step": 1900
},
{
"epoch": 1.23,
"learning_rate": 0.00022925081433224754,
"loss": 0.7077,
"step": 1910
},
{
"epoch": 1.23,
"learning_rate": 0.0002288599348534202,
"loss": 0.7022,
"step": 1920
},
{
"epoch": 1.24,
"learning_rate": 0.0002284690553745928,
"loss": 0.7158,
"step": 1930
},
{
"epoch": 1.25,
"learning_rate": 0.00022807817589576545,
"loss": 0.7186,
"step": 1940
},
{
"epoch": 1.25,
"learning_rate": 0.0002276872964169381,
"loss": 0.7046,
"step": 1950
},
{
"epoch": 1.26,
"learning_rate": 0.00022729641693811073,
"loss": 0.729,
"step": 1960
},
{
"epoch": 1.27,
"learning_rate": 0.0002269055374592834,
"loss": 0.6897,
"step": 1970
},
{
"epoch": 1.27,
"learning_rate": 0.000226514657980456,
"loss": 0.6949,
"step": 1980
},
{
"epoch": 1.28,
"learning_rate": 0.00022612377850162864,
"loss": 0.6945,
"step": 1990
},
{
"epoch": 1.29,
"learning_rate": 0.00022573289902280127,
"loss": 0.7178,
"step": 2000
},
{
"epoch": 1.29,
"eval_loss": 0.7295793294906616,
"eval_runtime": 305.3011,
"eval_samples_per_second": 6.551,
"eval_steps_per_second": 0.819,
"step": 2000
},
{
"epoch": 1.29,
"learning_rate": 0.00022534201954397393,
"loss": 0.711,
"step": 2010
},
{
"epoch": 1.3,
"learning_rate": 0.00022495114006514658,
"loss": 0.74,
"step": 2020
},
{
"epoch": 1.31,
"learning_rate": 0.00022456026058631918,
"loss": 0.6866,
"step": 2030
},
{
"epoch": 1.31,
"learning_rate": 0.00022416938110749184,
"loss": 0.7241,
"step": 2040
},
{
"epoch": 1.32,
"learning_rate": 0.00022377850162866447,
"loss": 0.6936,
"step": 2050
},
{
"epoch": 1.32,
"learning_rate": 0.00022338762214983712,
"loss": 0.7024,
"step": 2060
},
{
"epoch": 1.33,
"learning_rate": 0.00022299674267100978,
"loss": 0.7133,
"step": 2070
},
{
"epoch": 1.34,
"learning_rate": 0.00022260586319218238,
"loss": 0.7113,
"step": 2080
},
{
"epoch": 1.34,
"learning_rate": 0.00022221498371335503,
"loss": 0.7125,
"step": 2090
},
{
"epoch": 1.35,
"learning_rate": 0.00022182410423452766,
"loss": 0.7047,
"step": 2100
},
{
"epoch": 1.36,
"learning_rate": 0.00022143322475570032,
"loss": 0.7145,
"step": 2110
},
{
"epoch": 1.36,
"learning_rate": 0.00022104234527687295,
"loss": 0.7048,
"step": 2120
},
{
"epoch": 1.37,
"learning_rate": 0.00022065146579804557,
"loss": 0.7088,
"step": 2130
},
{
"epoch": 1.38,
"learning_rate": 0.0002202605863192182,
"loss": 0.6722,
"step": 2140
},
{
"epoch": 1.38,
"learning_rate": 0.00021986970684039086,
"loss": 0.7172,
"step": 2150
},
{
"epoch": 1.39,
"learning_rate": 0.0002194788273615635,
"loss": 0.7084,
"step": 2160
},
{
"epoch": 1.4,
"learning_rate": 0.00021908794788273614,
"loss": 0.7115,
"step": 2170
},
{
"epoch": 1.4,
"learning_rate": 0.00021869706840390877,
"loss": 0.711,
"step": 2180
},
{
"epoch": 1.41,
"learning_rate": 0.0002183061889250814,
"loss": 0.709,
"step": 2190
},
{
"epoch": 1.41,
"learning_rate": 0.00021791530944625405,
"loss": 0.7009,
"step": 2200
},
{
"epoch": 1.41,
"eval_loss": 0.7283578515052795,
"eval_runtime": 304.8952,
"eval_samples_per_second": 6.56,
"eval_steps_per_second": 0.82,
"step": 2200
},
{
"epoch": 1.42,
"learning_rate": 0.0002175244299674267,
"loss": 0.689,
"step": 2210
},
{
"epoch": 1.43,
"learning_rate": 0.00021713355048859934,
"loss": 0.7053,
"step": 2220
},
{
"epoch": 1.43,
"learning_rate": 0.000216742671009772,
"loss": 0.7083,
"step": 2230
},
{
"epoch": 1.44,
"learning_rate": 0.0002163517915309446,
"loss": 0.7243,
"step": 2240
},
{
"epoch": 1.45,
"learning_rate": 0.00021596091205211725,
"loss": 0.7015,
"step": 2250
},
{
"epoch": 1.45,
"learning_rate": 0.00021557003257328987,
"loss": 0.7003,
"step": 2260
},
{
"epoch": 1.46,
"learning_rate": 0.00021517915309446253,
"loss": 0.7082,
"step": 2270
},
{
"epoch": 1.47,
"learning_rate": 0.00021478827361563519,
"loss": 0.6813,
"step": 2280
},
{
"epoch": 1.47,
"learning_rate": 0.00021439739413680779,
"loss": 0.6653,
"step": 2290
},
{
"epoch": 1.48,
"learning_rate": 0.00021400651465798044,
"loss": 0.6942,
"step": 2300
},
{
"epoch": 1.49,
"learning_rate": 0.00021361563517915307,
"loss": 0.6831,
"step": 2310
},
{
"epoch": 1.49,
"learning_rate": 0.00021322475570032572,
"loss": 0.6975,
"step": 2320
},
{
"epoch": 1.5,
"learning_rate": 0.00021283387622149838,
"loss": 0.7188,
"step": 2330
},
{
"epoch": 1.5,
"learning_rate": 0.00021244299674267098,
"loss": 0.7015,
"step": 2340
},
{
"epoch": 1.51,
"learning_rate": 0.00021205211726384364,
"loss": 0.7035,
"step": 2350
},
{
"epoch": 1.52,
"learning_rate": 0.00021166123778501626,
"loss": 0.6914,
"step": 2360
},
{
"epoch": 1.52,
"learning_rate": 0.00021127035830618892,
"loss": 0.7002,
"step": 2370
},
{
"epoch": 1.53,
"learning_rate": 0.00021087947882736155,
"loss": 0.7263,
"step": 2380
},
{
"epoch": 1.54,
"learning_rate": 0.00021048859934853418,
"loss": 0.6828,
"step": 2390
},
{
"epoch": 1.54,
"learning_rate": 0.0002100977198697068,
"loss": 0.6953,
"step": 2400
},
{
"epoch": 1.54,
"eval_loss": 0.7254114151000977,
"eval_runtime": 305.3837,
"eval_samples_per_second": 6.549,
"eval_steps_per_second": 0.819,
"step": 2400
},
{
"epoch": 1.55,
"learning_rate": 0.00020970684039087946,
"loss": 0.6916,
"step": 2410
},
{
"epoch": 1.56,
"learning_rate": 0.00020931596091205211,
"loss": 0.6929,
"step": 2420
},
{
"epoch": 1.56,
"learning_rate": 0.00020892508143322474,
"loss": 0.6917,
"step": 2430
},
{
"epoch": 1.57,
"learning_rate": 0.00020853420195439737,
"loss": 0.7093,
"step": 2440
},
{
"epoch": 1.58,
"learning_rate": 0.00020814332247557,
"loss": 0.6587,
"step": 2450
},
{
"epoch": 1.58,
"learning_rate": 0.00020775244299674265,
"loss": 0.6849,
"step": 2460
},
{
"epoch": 1.59,
"learning_rate": 0.0002073615635179153,
"loss": 0.6868,
"step": 2470
},
{
"epoch": 1.59,
"learning_rate": 0.00020697068403908794,
"loss": 0.7033,
"step": 2480
},
{
"epoch": 1.6,
"learning_rate": 0.00020657980456026057,
"loss": 0.6981,
"step": 2490
},
{
"epoch": 1.61,
"learning_rate": 0.0002061889250814332,
"loss": 0.7093,
"step": 2500
},
{
"epoch": 1.61,
"learning_rate": 0.00020579804560260585,
"loss": 0.7087,
"step": 2510
},
{
"epoch": 1.62,
"learning_rate": 0.00020540716612377848,
"loss": 0.6819,
"step": 2520
},
{
"epoch": 1.63,
"learning_rate": 0.00020501628664495113,
"loss": 0.6874,
"step": 2530
},
{
"epoch": 1.63,
"learning_rate": 0.00020462540716612373,
"loss": 0.6895,
"step": 2540
},
{
"epoch": 1.64,
"learning_rate": 0.0002042345276872964,
"loss": 0.6954,
"step": 2550
},
{
"epoch": 1.65,
"learning_rate": 0.00020384364820846904,
"loss": 0.6676,
"step": 2560
},
{
"epoch": 1.65,
"learning_rate": 0.00020345276872964167,
"loss": 0.7144,
"step": 2570
},
{
"epoch": 1.66,
"learning_rate": 0.00020306188925081433,
"loss": 0.7194,
"step": 2580
},
{
"epoch": 1.67,
"learning_rate": 0.00020267100977198693,
"loss": 0.7016,
"step": 2590
},
{
"epoch": 1.67,
"learning_rate": 0.00020228013029315958,
"loss": 0.6849,
"step": 2600
},
{
"epoch": 1.67,
"eval_loss": 0.7234678864479065,
"eval_runtime": 305.414,
"eval_samples_per_second": 6.548,
"eval_steps_per_second": 0.819,
"step": 2600
},
{
"epoch": 1.68,
"learning_rate": 0.00020188925081433224,
"loss": 0.6993,
"step": 2610
},
{
"epoch": 1.68,
"learning_rate": 0.00020149837133550487,
"loss": 0.7068,
"step": 2620
},
{
"epoch": 1.69,
"learning_rate": 0.00020110749185667752,
"loss": 0.7013,
"step": 2630
},
{
"epoch": 1.7,
"learning_rate": 0.00020071661237785015,
"loss": 0.7064,
"step": 2640
},
{
"epoch": 1.7,
"learning_rate": 0.00020032573289902278,
"loss": 0.6916,
"step": 2650
},
{
"epoch": 1.71,
"learning_rate": 0.0001999348534201954,
"loss": 0.6717,
"step": 2660
},
{
"epoch": 1.72,
"learning_rate": 0.00019954397394136806,
"loss": 0.6969,
"step": 2670
},
{
"epoch": 1.72,
"learning_rate": 0.00019915309446254072,
"loss": 0.685,
"step": 2680
},
{
"epoch": 1.73,
"learning_rate": 0.00019876221498371335,
"loss": 0.7093,
"step": 2690
},
{
"epoch": 1.74,
"learning_rate": 0.00019837133550488597,
"loss": 0.6936,
"step": 2700
},
{
"epoch": 1.74,
"learning_rate": 0.0001979804560260586,
"loss": 0.6898,
"step": 2710
},
{
"epoch": 1.75,
"learning_rate": 0.00019758957654723126,
"loss": 0.6914,
"step": 2720
},
{
"epoch": 1.76,
"learning_rate": 0.0001971986970684039,
"loss": 0.6863,
"step": 2730
},
{
"epoch": 1.76,
"learning_rate": 0.00019680781758957654,
"loss": 0.6974,
"step": 2740
},
{
"epoch": 1.77,
"learning_rate": 0.00019641693811074917,
"loss": 0.7073,
"step": 2750
},
{
"epoch": 1.77,
"learning_rate": 0.0001960260586319218,
"loss": 0.6874,
"step": 2760
},
{
"epoch": 1.78,
"learning_rate": 0.00019563517915309445,
"loss": 0.6847,
"step": 2770
},
{
"epoch": 1.79,
"learning_rate": 0.00019524429967426708,
"loss": 0.6989,
"step": 2780
},
{
"epoch": 1.79,
"learning_rate": 0.00019485342019543974,
"loss": 0.6953,
"step": 2790
},
{
"epoch": 1.8,
"learning_rate": 0.00019446254071661234,
"loss": 0.6855,
"step": 2800
},
{
"epoch": 1.8,
"eval_loss": 0.7221594452857971,
"eval_runtime": 304.2851,
"eval_samples_per_second": 6.573,
"eval_steps_per_second": 0.822,
"step": 2800
},
{
"epoch": 1.81,
"learning_rate": 0.000194071661237785,
"loss": 0.7001,
"step": 2810
},
{
"epoch": 1.81,
"learning_rate": 0.00019368078175895765,
"loss": 0.7271,
"step": 2820
},
{
"epoch": 1.82,
"learning_rate": 0.00019328990228013028,
"loss": 0.6892,
"step": 2830
},
{
"epoch": 1.83,
"learning_rate": 0.00019289902280130293,
"loss": 0.7021,
"step": 2840
},
{
"epoch": 1.83,
"learning_rate": 0.00019250814332247553,
"loss": 0.713,
"step": 2850
},
{
"epoch": 1.84,
"learning_rate": 0.0001921172638436482,
"loss": 0.7011,
"step": 2860
},
{
"epoch": 1.85,
"learning_rate": 0.00019172638436482084,
"loss": 0.6932,
"step": 2870
},
{
"epoch": 1.85,
"learning_rate": 0.00019133550488599347,
"loss": 0.6895,
"step": 2880
},
{
"epoch": 1.86,
"learning_rate": 0.00019094462540716613,
"loss": 0.6971,
"step": 2890
},
{
"epoch": 1.86,
"learning_rate": 0.00019055374592833873,
"loss": 0.6949,
"step": 2900
},
{
"epoch": 1.87,
"learning_rate": 0.00019016286644951138,
"loss": 0.6767,
"step": 2910
},
{
"epoch": 1.88,
"learning_rate": 0.000189771986970684,
"loss": 0.693,
"step": 2920
},
{
"epoch": 1.88,
"learning_rate": 0.00018938110749185666,
"loss": 0.7048,
"step": 2930
},
{
"epoch": 1.89,
"learning_rate": 0.00018899022801302932,
"loss": 0.7007,
"step": 2940
},
{
"epoch": 1.9,
"learning_rate": 0.00018859934853420192,
"loss": 0.6677,
"step": 2950
},
{
"epoch": 1.9,
"learning_rate": 0.00018820846905537458,
"loss": 0.7071,
"step": 2960
},
{
"epoch": 1.91,
"learning_rate": 0.0001878175895765472,
"loss": 0.7014,
"step": 2970
},
{
"epoch": 1.92,
"learning_rate": 0.00018742671009771986,
"loss": 0.7017,
"step": 2980
},
{
"epoch": 1.92,
"learning_rate": 0.00018703583061889252,
"loss": 0.6962,
"step": 2990
},
{
"epoch": 1.93,
"learning_rate": 0.00018664495114006514,
"loss": 0.6785,
"step": 3000
},
{
"epoch": 1.93,
"eval_loss": 0.7213538289070129,
"eval_runtime": 304.1305,
"eval_samples_per_second": 6.576,
"eval_steps_per_second": 0.822,
"step": 3000
},
{
"epoch": 1.94,
"learning_rate": 0.00018625407166123777,
"loss": 0.68,
"step": 3010
},
{
"epoch": 1.94,
"learning_rate": 0.0001858631921824104,
"loss": 0.6839,
"step": 3020
},
{
"epoch": 1.95,
"learning_rate": 0.00018547231270358305,
"loss": 0.6739,
"step": 3030
},
{
"epoch": 1.95,
"learning_rate": 0.00018508143322475568,
"loss": 0.6944,
"step": 3040
},
{
"epoch": 1.96,
"learning_rate": 0.00018469055374592834,
"loss": 0.7065,
"step": 3050
},
{
"epoch": 1.97,
"learning_rate": 0.00018429967426710094,
"loss": 0.7103,
"step": 3060
},
{
"epoch": 1.97,
"learning_rate": 0.0001839087947882736,
"loss": 0.7001,
"step": 3070
},
{
"epoch": 1.98,
"learning_rate": 0.00018351791530944625,
"loss": 0.6892,
"step": 3080
},
{
"epoch": 1.99,
"learning_rate": 0.00018312703583061888,
"loss": 0.7009,
"step": 3090
},
{
"epoch": 1.99,
"learning_rate": 0.00018273615635179153,
"loss": 0.714,
"step": 3100
},
{
"epoch": 2.0,
"learning_rate": 0.00018234527687296413,
"loss": 0.7133,
"step": 3110
},
{
"epoch": 2.01,
"learning_rate": 0.0001819543973941368,
"loss": 0.6702,
"step": 3120
},
{
"epoch": 2.01,
"learning_rate": 0.00018156351791530944,
"loss": 0.6416,
"step": 3130
},
{
"epoch": 2.02,
"learning_rate": 0.00018117263843648207,
"loss": 0.6654,
"step": 3140
},
{
"epoch": 2.03,
"learning_rate": 0.00018078175895765473,
"loss": 0.6678,
"step": 3150
},
{
"epoch": 2.03,
"learning_rate": 0.00018039087947882733,
"loss": 0.6657,
"step": 3160
},
{
"epoch": 2.04,
"learning_rate": 0.00017999999999999998,
"loss": 0.6588,
"step": 3170
},
{
"epoch": 2.05,
"learning_rate": 0.0001796091205211726,
"loss": 0.6545,
"step": 3180
},
{
"epoch": 2.05,
"learning_rate": 0.00017921824104234527,
"loss": 0.6266,
"step": 3190
},
{
"epoch": 2.06,
"learning_rate": 0.00017882736156351792,
"loss": 0.6608,
"step": 3200
},
{
"epoch": 2.06,
"eval_loss": 0.7244688868522644,
"eval_runtime": 301.8449,
"eval_samples_per_second": 6.626,
"eval_steps_per_second": 0.828,
"step": 3200
},
{
"epoch": 2.06,
"learning_rate": 0.00017843648208469052,
"loss": 0.6472,
"step": 3210
},
{
"epoch": 2.07,
"learning_rate": 0.00017804560260586318,
"loss": 0.6375,
"step": 3220
},
{
"epoch": 2.08,
"learning_rate": 0.0001776547231270358,
"loss": 0.6715,
"step": 3230
},
{
"epoch": 2.08,
"learning_rate": 0.00017726384364820846,
"loss": 0.6541,
"step": 3240
},
{
"epoch": 2.09,
"learning_rate": 0.0001768729641693811,
"loss": 0.6707,
"step": 3250
},
{
"epoch": 2.1,
"learning_rate": 0.00017648208469055372,
"loss": 0.6634,
"step": 3260
},
{
"epoch": 2.1,
"learning_rate": 0.00017609120521172637,
"loss": 0.6558,
"step": 3270
},
{
"epoch": 2.11,
"learning_rate": 0.000175700325732899,
"loss": 0.6741,
"step": 3280
},
{
"epoch": 2.12,
"learning_rate": 0.00017530944625407166,
"loss": 0.6454,
"step": 3290
},
{
"epoch": 2.12,
"learning_rate": 0.00017491856677524429,
"loss": 0.6619,
"step": 3300
},
{
"epoch": 2.13,
"learning_rate": 0.00017452768729641691,
"loss": 0.6417,
"step": 3310
},
{
"epoch": 2.14,
"learning_rate": 0.00017413680781758954,
"loss": 0.6485,
"step": 3320
},
{
"epoch": 2.14,
"learning_rate": 0.0001737459283387622,
"loss": 0.6326,
"step": 3330
},
{
"epoch": 2.15,
"learning_rate": 0.00017335504885993485,
"loss": 0.663,
"step": 3340
},
{
"epoch": 2.15,
"learning_rate": 0.00017296416938110748,
"loss": 0.6529,
"step": 3350
},
{
"epoch": 2.16,
"learning_rate": 0.0001725732899022801,
"loss": 0.6619,
"step": 3360
},
{
"epoch": 2.17,
"learning_rate": 0.00017218241042345274,
"loss": 0.6553,
"step": 3370
},
{
"epoch": 2.17,
"learning_rate": 0.0001717915309446254,
"loss": 0.6514,
"step": 3380
},
{
"epoch": 2.18,
"learning_rate": 0.00017140065146579802,
"loss": 0.6634,
"step": 3390
},
{
"epoch": 2.19,
"learning_rate": 0.00017100977198697068,
"loss": 0.6438,
"step": 3400
},
{
"epoch": 2.19,
"eval_loss": 0.7247596979141235,
"eval_runtime": 304.03,
"eval_samples_per_second": 6.578,
"eval_steps_per_second": 0.822,
"step": 3400
},
{
"epoch": 2.19,
"learning_rate": 0.00017061889250814333,
"loss": 0.6592,
"step": 3410
},
{
"epoch": 2.2,
"learning_rate": 0.00017022801302931593,
"loss": 0.6478,
"step": 3420
},
{
"epoch": 2.21,
"learning_rate": 0.0001698371335504886,
"loss": 0.6475,
"step": 3430
},
{
"epoch": 2.21,
"learning_rate": 0.00016944625407166122,
"loss": 0.6559,
"step": 3440
},
{
"epoch": 2.22,
"learning_rate": 0.00016905537459283387,
"loss": 0.6635,
"step": 3450
},
{
"epoch": 2.23,
"learning_rate": 0.00016866449511400653,
"loss": 0.667,
"step": 3460
},
{
"epoch": 2.23,
"learning_rate": 0.00016827361563517913,
"loss": 0.6781,
"step": 3470
},
{
"epoch": 2.24,
"learning_rate": 0.00016788273615635178,
"loss": 0.6446,
"step": 3480
},
{
"epoch": 2.24,
"learning_rate": 0.0001674918566775244,
"loss": 0.6617,
"step": 3490
},
{
"epoch": 2.25,
"learning_rate": 0.00016710097719869707,
"loss": 0.6497,
"step": 3500
},
{
"epoch": 2.26,
"learning_rate": 0.0001667100977198697,
"loss": 0.6473,
"step": 3510
},
{
"epoch": 2.26,
"learning_rate": 0.00016631921824104232,
"loss": 0.6497,
"step": 3520
},
{
"epoch": 2.27,
"learning_rate": 0.00016592833876221495,
"loss": 0.6599,
"step": 3530
},
{
"epoch": 2.28,
"learning_rate": 0.0001655374592833876,
"loss": 0.6581,
"step": 3540
},
{
"epoch": 2.28,
"learning_rate": 0.00016514657980456026,
"loss": 0.6265,
"step": 3550
},
{
"epoch": 2.29,
"learning_rate": 0.0001647557003257329,
"loss": 0.6648,
"step": 3560
},
{
"epoch": 2.3,
"learning_rate": 0.00016436482084690552,
"loss": 0.6568,
"step": 3570
},
{
"epoch": 2.3,
"learning_rate": 0.00016397394136807814,
"loss": 0.6629,
"step": 3580
},
{
"epoch": 2.31,
"learning_rate": 0.0001635830618892508,
"loss": 0.6526,
"step": 3590
},
{
"epoch": 2.32,
"learning_rate": 0.00016319218241042346,
"loss": 0.6362,
"step": 3600
},
{
"epoch": 2.32,
"eval_loss": 0.7260497808456421,
"eval_runtime": 304.4517,
"eval_samples_per_second": 6.569,
"eval_steps_per_second": 0.821,
"step": 3600
},
{
"epoch": 2.32,
"learning_rate": 0.00016280130293159608,
"loss": 0.6525,
"step": 3610
},
{
"epoch": 2.33,
"learning_rate": 0.0001624104234527687,
"loss": 0.6389,
"step": 3620
},
{
"epoch": 2.33,
"learning_rate": 0.00016201954397394134,
"loss": 0.6672,
"step": 3630
},
{
"epoch": 2.34,
"learning_rate": 0.000161628664495114,
"loss": 0.686,
"step": 3640
},
{
"epoch": 2.35,
"learning_rate": 0.00016123778501628662,
"loss": 0.6508,
"step": 3650
},
{
"epoch": 2.35,
"learning_rate": 0.00016084690553745928,
"loss": 0.6449,
"step": 3660
},
{
"epoch": 2.36,
"learning_rate": 0.00016045602605863188,
"loss": 0.6482,
"step": 3670
},
{
"epoch": 2.37,
"learning_rate": 0.00016006514657980453,
"loss": 0.6607,
"step": 3680
},
{
"epoch": 2.37,
"learning_rate": 0.0001596742671009772,
"loss": 0.6656,
"step": 3690
},
{
"epoch": 2.38,
"learning_rate": 0.00015928338762214982,
"loss": 0.6624,
"step": 3700
},
{
"epoch": 2.39,
"learning_rate": 0.00015889250814332247,
"loss": 0.6436,
"step": 3710
},
{
"epoch": 2.39,
"learning_rate": 0.00015850162866449507,
"loss": 0.6792,
"step": 3720
},
{
"epoch": 2.4,
"learning_rate": 0.00015811074918566773,
"loss": 0.6549,
"step": 3730
},
{
"epoch": 2.41,
"learning_rate": 0.00015771986970684038,
"loss": 0.655,
"step": 3740
},
{
"epoch": 2.41,
"learning_rate": 0.000157328990228013,
"loss": 0.6468,
"step": 3750
},
{
"epoch": 2.42,
"learning_rate": 0.00015693811074918567,
"loss": 0.6489,
"step": 3760
},
{
"epoch": 2.42,
"learning_rate": 0.0001565472312703583,
"loss": 0.6587,
"step": 3770
},
{
"epoch": 2.43,
"learning_rate": 0.00015615635179153092,
"loss": 0.6506,
"step": 3780
},
{
"epoch": 2.44,
"learning_rate": 0.00015576547231270355,
"loss": 0.6525,
"step": 3790
},
{
"epoch": 2.44,
"learning_rate": 0.0001553745928338762,
"loss": 0.6554,
"step": 3800
},
{
"epoch": 2.44,
"eval_loss": 0.7240291833877563,
"eval_runtime": 304.5938,
"eval_samples_per_second": 6.566,
"eval_steps_per_second": 0.821,
"step": 3800
},
{
"epoch": 2.45,
"learning_rate": 0.00015498371335504886,
"loss": 0.6404,
"step": 3810
},
{
"epoch": 2.46,
"learning_rate": 0.0001545928338762215,
"loss": 0.6856,
"step": 3820
},
{
"epoch": 2.46,
"learning_rate": 0.00015420195439739412,
"loss": 0.6508,
"step": 3830
},
{
"epoch": 2.47,
"learning_rate": 0.00015381107491856675,
"loss": 0.6485,
"step": 3840
},
{
"epoch": 2.48,
"learning_rate": 0.0001534201954397394,
"loss": 0.626,
"step": 3850
},
{
"epoch": 2.48,
"learning_rate": 0.00015302931596091206,
"loss": 0.6595,
"step": 3860
},
{
"epoch": 2.49,
"learning_rate": 0.00015263843648208469,
"loss": 0.6737,
"step": 3870
},
{
"epoch": 2.5,
"learning_rate": 0.00015224755700325731,
"loss": 0.634,
"step": 3880
},
{
"epoch": 2.5,
"learning_rate": 0.00015185667752442994,
"loss": 0.6691,
"step": 3890
},
{
"epoch": 2.51,
"learning_rate": 0.0001514657980456026,
"loss": 0.6507,
"step": 3900
},
{
"epoch": 2.51,
"learning_rate": 0.00015107491856677523,
"loss": 0.6438,
"step": 3910
},
{
"epoch": 2.52,
"learning_rate": 0.00015068403908794788,
"loss": 0.6553,
"step": 3920
},
{
"epoch": 2.53,
"learning_rate": 0.00015029315960912048,
"loss": 0.6753,
"step": 3930
},
{
"epoch": 2.53,
"learning_rate": 0.00014990228013029314,
"loss": 0.6727,
"step": 3940
},
{
"epoch": 2.54,
"learning_rate": 0.0001495114006514658,
"loss": 0.6462,
"step": 3950
},
{
"epoch": 2.55,
"learning_rate": 0.00014912052117263842,
"loss": 0.6605,
"step": 3960
},
{
"epoch": 2.55,
"learning_rate": 0.00014872964169381105,
"loss": 0.651,
"step": 3970
},
{
"epoch": 2.56,
"learning_rate": 0.0001483387622149837,
"loss": 0.668,
"step": 3980
},
{
"epoch": 2.57,
"learning_rate": 0.00014794788273615633,
"loss": 0.6631,
"step": 3990
},
{
"epoch": 2.57,
"learning_rate": 0.000147557003257329,
"loss": 0.6676,
"step": 4000
},
{
"epoch": 2.57,
"eval_loss": 0.7227743268013,
"eval_runtime": 305.0392,
"eval_samples_per_second": 6.557,
"eval_steps_per_second": 0.82,
"step": 4000
},
{
"epoch": 2.58,
"learning_rate": 0.00014716612377850162,
"loss": 0.6438,
"step": 4010
},
{
"epoch": 2.59,
"learning_rate": 0.00014677524429967424,
"loss": 0.6503,
"step": 4020
},
{
"epoch": 2.59,
"learning_rate": 0.0001463843648208469,
"loss": 0.6534,
"step": 4030
},
{
"epoch": 2.6,
"learning_rate": 0.00014599348534201953,
"loss": 0.6497,
"step": 4040
},
{
"epoch": 2.6,
"learning_rate": 0.00014560260586319216,
"loss": 0.6633,
"step": 4050
},
{
"epoch": 2.61,
"learning_rate": 0.0001452117263843648,
"loss": 0.6473,
"step": 4060
},
{
"epoch": 2.62,
"learning_rate": 0.00014482084690553744,
"loss": 0.6532,
"step": 4070
},
{
"epoch": 2.62,
"learning_rate": 0.0001444299674267101,
"loss": 0.6687,
"step": 4080
},
{
"epoch": 2.63,
"learning_rate": 0.00014403908794788272,
"loss": 0.6582,
"step": 4090
},
{
"epoch": 2.64,
"learning_rate": 0.00014364820846905535,
"loss": 0.6498,
"step": 4100
},
{
"epoch": 2.64,
"learning_rate": 0.000143257328990228,
"loss": 0.6665,
"step": 4110
},
{
"epoch": 2.65,
"learning_rate": 0.00014286644951140063,
"loss": 0.6519,
"step": 4120
},
{
"epoch": 2.66,
"learning_rate": 0.0001424755700325733,
"loss": 0.6597,
"step": 4130
},
{
"epoch": 2.66,
"learning_rate": 0.00014208469055374592,
"loss": 0.6585,
"step": 4140
},
{
"epoch": 2.67,
"learning_rate": 0.00014169381107491854,
"loss": 0.6614,
"step": 4150
},
{
"epoch": 2.68,
"learning_rate": 0.0001413029315960912,
"loss": 0.641,
"step": 4160
},
{
"epoch": 2.68,
"learning_rate": 0.00014091205211726383,
"loss": 0.6417,
"step": 4170
},
{
"epoch": 2.69,
"learning_rate": 0.00014052117263843646,
"loss": 0.6844,
"step": 4180
},
{
"epoch": 2.69,
"learning_rate": 0.0001401302931596091,
"loss": 0.6464,
"step": 4190
},
{
"epoch": 2.7,
"learning_rate": 0.00013973941368078174,
"loss": 0.6689,
"step": 4200
},
{
"epoch": 2.7,
"eval_loss": 0.7206348776817322,
"eval_runtime": 302.1995,
"eval_samples_per_second": 6.618,
"eval_steps_per_second": 0.827,
"step": 4200
}
],
"logging_steps": 10,
"max_steps": 7775,
"num_train_epochs": 5,
"save_steps": 200,
"total_flos": 3.0362368185357926e+18,
"trial_name": null,
"trial_params": null
}