{ "best_metric": 1.486396074295044, "best_model_checkpoint": "saves/ChineseLLaMA2-7B-Chat/lora/2023-09-07-12-02-29/checkpoint-600", "epoch": 0.6502302898943376, "eval_steps": 100, "global_step": 600, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "learning_rate": 0.0009999919374161553, "loss": 2.0025, "step": 5 }, { "epoch": 0.01, "learning_rate": 0.0009999677499246417, "loss": 1.7737, "step": 10 }, { "epoch": 0.02, "learning_rate": 0.0009999274383055143, "loss": 1.7391, "step": 15 }, { "epoch": 0.02, "learning_rate": 0.0009998710038588363, "loss": 1.7959, "step": 20 }, { "epoch": 0.03, "learning_rate": 0.0009997984484046375, "loss": 1.713, "step": 25 }, { "epoch": 0.03, "learning_rate": 0.0009997097742828556, "loss": 1.6441, "step": 30 }, { "epoch": 0.04, "learning_rate": 0.0009996049843532607, "loss": 1.704, "step": 35 }, { "epoch": 0.04, "learning_rate": 0.0009994840819953633, "loss": 1.6532, "step": 40 }, { "epoch": 0.05, "learning_rate": 0.0009993470711083048, "loss": 1.6791, "step": 45 }, { "epoch": 0.05, "learning_rate": 0.0009991939561107325, "loss": 1.6465, "step": 50 }, { "epoch": 0.06, "learning_rate": 0.000999024741940656, "loss": 1.6511, "step": 55 }, { "epoch": 0.07, "learning_rate": 0.0009988394340552898, "loss": 1.6727, "step": 60 }, { "epoch": 0.07, "learning_rate": 0.0009986380384308746, "loss": 1.6653, "step": 65 }, { "epoch": 0.08, "learning_rate": 0.0009984205615624873, "loss": 1.6339, "step": 70 }, { "epoch": 0.08, "learning_rate": 0.0009981870104638294, "loss": 1.5562, "step": 75 }, { "epoch": 0.09, "learning_rate": 0.0009979373926670028, "loss": 1.6291, "step": 80 }, { "epoch": 0.09, "learning_rate": 0.0009976717162222645, "loss": 1.625, "step": 85 }, { "epoch": 0.1, "learning_rate": 0.0009973899896977695, "loss": 1.6008, "step": 90 }, { "epoch": 0.1, "learning_rate": 0.000997092222179292, "loss": 1.6821, "step": 95 }, { "epoch": 0.11, "learning_rate": 0.0009967784232699352, "loss": 1.582, "step": 100 }, { "epoch": 0.11, "eval_loss": 1.6186352968215942, "eval_runtime": 10.6735, "eval_samples_per_second": 14.054, "eval_steps_per_second": 1.78, "step": 100 }, { "epoch": 0.11, "learning_rate": 0.0009964486030898186, "loss": 1.5769, "step": 105 }, { "epoch": 0.12, "learning_rate": 0.0009961027722757538, "loss": 1.5868, "step": 110 }, { "epoch": 0.12, "learning_rate": 0.0009957409419809006, "loss": 1.5601, "step": 115 }, { "epoch": 0.13, "learning_rate": 0.000995363123874407, "loss": 1.6061, "step": 120 }, { "epoch": 0.14, "learning_rate": 0.0009949693301410341, "loss": 1.6073, "step": 125 }, { "epoch": 0.14, "learning_rate": 0.0009945595734807615, "loss": 1.4998, "step": 130 }, { "epoch": 0.15, "learning_rate": 0.0009941338671083794, "loss": 1.5295, "step": 135 }, { "epoch": 0.15, "learning_rate": 0.0009936922247530606, "loss": 1.5418, "step": 140 }, { "epoch": 0.16, "learning_rate": 0.0009932346606579192, "loss": 1.554, "step": 145 }, { "epoch": 0.16, "learning_rate": 0.0009927611895795513, "loss": 1.5509, "step": 150 }, { "epoch": 0.17, "learning_rate": 0.0009922718267875571, "loss": 1.6123, "step": 155 }, { "epoch": 0.17, "learning_rate": 0.0009917665880640515, "loss": 1.6267, "step": 160 }, { "epoch": 0.18, "learning_rate": 0.0009912454897031524, "loss": 1.6116, "step": 165 }, { "epoch": 0.18, "learning_rate": 0.0009907085485104568, "loss": 1.5618, "step": 170 }, { "epoch": 0.19, "learning_rate": 0.0009901557818024981, "loss": 1.6085, "step": 175 }, { "epoch": 0.2, "learning_rate": 0.0009895872074061885, "loss": 1.5829, "step": 180 }, { "epoch": 0.2, "learning_rate": 0.0009890028436582426, "loss": 1.5407, "step": 185 }, { "epoch": 0.21, "learning_rate": 0.0009884027094045871, "loss": 1.5568, "step": 190 }, { "epoch": 0.21, "learning_rate": 0.0009877868239997532, "loss": 1.5831, "step": 195 }, { "epoch": 0.22, "learning_rate": 0.0009871552073062516, "loss": 1.5231, "step": 200 }, { "epoch": 0.22, "eval_loss": 1.5717933177947998, "eval_runtime": 10.6708, "eval_samples_per_second": 14.057, "eval_steps_per_second": 1.781, "step": 200 }, { "epoch": 0.22, "learning_rate": 0.0009865078796939327, "loss": 1.5467, "step": 205 }, { "epoch": 0.23, "learning_rate": 0.000985844862039329, "loss": 1.6403, "step": 210 }, { "epoch": 0.23, "learning_rate": 0.0009851661757249823, "loss": 1.5352, "step": 215 }, { "epoch": 0.24, "learning_rate": 0.0009844718426387537, "loss": 1.5616, "step": 220 }, { "epoch": 0.24, "learning_rate": 0.000983761885173118, "loss": 1.5274, "step": 225 }, { "epoch": 0.25, "learning_rate": 0.000983036326224442, "loss": 1.6153, "step": 230 }, { "epoch": 0.25, "learning_rate": 0.0009822951891922448, "loss": 1.5062, "step": 235 }, { "epoch": 0.26, "learning_rate": 0.0009815384979784444, "loss": 1.6038, "step": 240 }, { "epoch": 0.27, "learning_rate": 0.000980766276986586, "loss": 1.5097, "step": 245 }, { "epoch": 0.27, "learning_rate": 0.0009799785511210557, "loss": 1.535, "step": 250 }, { "epoch": 0.28, "learning_rate": 0.000979175345786277, "loss": 1.52, "step": 255 }, { "epoch": 0.28, "learning_rate": 0.0009783566868858912, "loss": 1.5678, "step": 260 }, { "epoch": 0.29, "learning_rate": 0.0009775226008219224, "loss": 1.5536, "step": 265 }, { "epoch": 0.29, "learning_rate": 0.0009766731144939258, "loss": 1.4826, "step": 270 }, { "epoch": 0.3, "learning_rate": 0.0009758082552981204, "loss": 1.5537, "step": 275 }, { "epoch": 0.3, "learning_rate": 0.0009749280511265056, "loss": 1.5277, "step": 280 }, { "epoch": 0.31, "learning_rate": 0.0009740325303659609, "loss": 1.5445, "step": 285 }, { "epoch": 0.31, "learning_rate": 0.000973121721897331, "loss": 1.4944, "step": 290 }, { "epoch": 0.32, "learning_rate": 0.0009721956550944948, "loss": 1.5088, "step": 295 }, { "epoch": 0.33, "learning_rate": 0.0009712543598234172, "loss": 1.585, "step": 300 }, { "epoch": 0.33, "eval_loss": 1.5345921516418457, "eval_runtime": 10.6704, "eval_samples_per_second": 14.058, "eval_steps_per_second": 1.781, "step": 300 }, { "epoch": 0.33, "learning_rate": 0.0009702978664411863, "loss": 1.5427, "step": 305 }, { "epoch": 0.34, "learning_rate": 0.0009693262057950345, "loss": 1.4475, "step": 310 }, { "epoch": 0.34, "learning_rate": 0.0009683394092213436, "loss": 1.5321, "step": 315 }, { "epoch": 0.35, "learning_rate": 0.0009673375085446339, "loss": 1.5171, "step": 320 }, { "epoch": 0.35, "learning_rate": 0.0009663205360765382, "loss": 1.5198, "step": 325 }, { "epoch": 0.36, "learning_rate": 0.00096528852461476, "loss": 1.492, "step": 330 }, { "epoch": 0.36, "learning_rate": 0.0009642415074420146, "loss": 1.5036, "step": 335 }, { "epoch": 0.37, "learning_rate": 0.0009631795183249573, "loss": 1.5134, "step": 340 }, { "epoch": 0.37, "learning_rate": 0.0009621025915130932, "loss": 1.5568, "step": 345 }, { "epoch": 0.38, "learning_rate": 0.0009610107617376733, "loss": 1.503, "step": 350 }, { "epoch": 0.38, "learning_rate": 0.0009599040642105736, "loss": 1.4584, "step": 355 }, { "epoch": 0.39, "learning_rate": 0.000958782534623161, "loss": 1.4832, "step": 360 }, { "epoch": 0.4, "learning_rate": 0.0009576462091451406, "loss": 1.4598, "step": 365 }, { "epoch": 0.4, "learning_rate": 0.0009564951244233901, "loss": 1.5492, "step": 370 }, { "epoch": 0.41, "learning_rate": 0.000955329317580778, "loss": 1.5145, "step": 375 }, { "epoch": 0.41, "learning_rate": 0.0009541488262149661, "loss": 1.589, "step": 380 }, { "epoch": 0.42, "learning_rate": 0.0009529536883971963, "loss": 1.6003, "step": 385 }, { "epoch": 0.42, "learning_rate": 0.0009517439426710646, "loss": 1.55, "step": 390 }, { "epoch": 0.43, "learning_rate": 0.0009505196280512762, "loss": 1.5359, "step": 395 }, { "epoch": 0.43, "learning_rate": 0.0009492807840223881, "loss": 1.4854, "step": 400 }, { "epoch": 0.43, "eval_loss": 1.5193477869033813, "eval_runtime": 10.6722, "eval_samples_per_second": 14.055, "eval_steps_per_second": 1.78, "step": 400 }, { "epoch": 0.44, "learning_rate": 0.0009480274505375358, "loss": 1.4891, "step": 405 }, { "epoch": 0.44, "learning_rate": 0.0009467596680171446, "loss": 1.4719, "step": 410 }, { "epoch": 0.45, "learning_rate": 0.0009454774773476257, "loss": 1.4939, "step": 415 }, { "epoch": 0.46, "learning_rate": 0.0009441809198800587, "loss": 1.4382, "step": 420 }, { "epoch": 0.46, "learning_rate": 0.0009428700374288564, "loss": 1.4427, "step": 425 }, { "epoch": 0.47, "learning_rate": 0.0009415448722704175, "loss": 1.4767, "step": 430 }, { "epoch": 0.47, "learning_rate": 0.0009402054671417628, "loss": 1.4799, "step": 435 }, { "epoch": 0.48, "learning_rate": 0.0009388518652391571, "loss": 1.4608, "step": 440 }, { "epoch": 0.48, "learning_rate": 0.0009374841102167157, "loss": 1.4937, "step": 445 }, { "epoch": 0.49, "learning_rate": 0.0009361022461849965, "loss": 1.5468, "step": 450 }, { "epoch": 0.49, "learning_rate": 0.0009347063177095783, "loss": 1.5481, "step": 455 }, { "epoch": 0.5, "learning_rate": 0.0009332963698096223, "loss": 1.4478, "step": 460 }, { "epoch": 0.5, "learning_rate": 0.0009318724479564215, "loss": 1.4977, "step": 465 }, { "epoch": 0.51, "learning_rate": 0.0009304345980719329, "loss": 1.5091, "step": 470 }, { "epoch": 0.51, "learning_rate": 0.0009289828665272977, "loss": 1.43, "step": 475 }, { "epoch": 0.52, "learning_rate": 0.0009275173001413448, "loss": 1.4725, "step": 480 }, { "epoch": 0.53, "learning_rate": 0.0009260379461790822, "loss": 1.3741, "step": 485 }, { "epoch": 0.53, "learning_rate": 0.0009245448523501708, "loss": 1.4917, "step": 490 }, { "epoch": 0.54, "learning_rate": 0.0009230380668073877, "loss": 1.4684, "step": 495 }, { "epoch": 0.54, "learning_rate": 0.0009215176381450717, "loss": 1.5209, "step": 500 }, { "epoch": 0.54, "eval_loss": 1.5050214529037476, "eval_runtime": 10.6706, "eval_samples_per_second": 14.057, "eval_steps_per_second": 1.781, "step": 500 }, { "epoch": 0.55, "learning_rate": 0.0009199836153975573, "loss": 1.4913, "step": 505 }, { "epoch": 0.55, "learning_rate": 0.0009184360480375926, "loss": 1.5377, "step": 510 }, { "epoch": 0.56, "learning_rate": 0.0009168749859747438, "loss": 1.4608, "step": 515 }, { "epoch": 0.56, "learning_rate": 0.0009153004795537861, "loss": 1.4738, "step": 520 }, { "epoch": 0.57, "learning_rate": 0.0009137125795530795, "loss": 1.4947, "step": 525 }, { "epoch": 0.57, "learning_rate": 0.0009121113371829318, "loss": 1.5267, "step": 530 }, { "epoch": 0.58, "learning_rate": 0.0009104968040839463, "loss": 1.5116, "step": 535 }, { "epoch": 0.59, "learning_rate": 0.000908869032325357, "loss": 1.4423, "step": 540 }, { "epoch": 0.59, "learning_rate": 0.000907228074403349, "loss": 1.4565, "step": 545 }, { "epoch": 0.6, "learning_rate": 0.0009055739832393655, "loss": 1.4923, "step": 550 }, { "epoch": 0.6, "learning_rate": 0.0009039068121784016, "loss": 1.4304, "step": 555 }, { "epoch": 0.61, "learning_rate": 0.0009022266149872829, "loss": 1.4422, "step": 560 }, { "epoch": 0.61, "learning_rate": 0.0009005334458529322, "loss": 1.522, "step": 565 }, { "epoch": 0.62, "learning_rate": 0.0008988273593806222, "loss": 1.499, "step": 570 }, { "epoch": 0.62, "learning_rate": 0.0008971084105922139, "loss": 1.4796, "step": 575 }, { "epoch": 0.63, "learning_rate": 0.0008953766549243818, "loss": 1.4231, "step": 580 }, { "epoch": 0.63, "learning_rate": 0.0008936321482268275, "loss": 1.462, "step": 585 }, { "epoch": 0.64, "learning_rate": 0.0008918749467604766, "loss": 1.5191, "step": 590 }, { "epoch": 0.64, "learning_rate": 0.0008901051071956661, "loss": 1.4845, "step": 595 }, { "epoch": 0.65, "learning_rate": 0.0008883226866103152, "loss": 1.4652, "step": 600 }, { "epoch": 0.65, "eval_loss": 1.486396074295044, "eval_runtime": 10.6718, "eval_samples_per_second": 14.056, "eval_steps_per_second": 1.78, "step": 600 } ], "logging_steps": 5, "max_steps": 2766, "num_train_epochs": 3, "save_steps": 100, "total_flos": 1.977962496786432e+17, "trial_name": null, "trial_params": null }