{ "best_metric": 0.4632822871208191, "best_model_checkpoint": "./output_v2/7b_cluster019_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_019/checkpoint-600", "epoch": 2.038216560509554, "global_step": 1200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "learning_rate": 0.0002, "loss": 0.6267, "step": 10 }, { "epoch": 0.03, "learning_rate": 0.0002, "loss": 0.7811, "step": 20 }, { "epoch": 0.05, "learning_rate": 0.0002, "loss": 0.5062, "step": 30 }, { "epoch": 0.07, "learning_rate": 0.0002, "loss": 0.6137, "step": 40 }, { "epoch": 0.08, "learning_rate": 0.0002, "loss": 0.4957, "step": 50 }, { "epoch": 0.1, "learning_rate": 0.0002, "loss": 0.4838, "step": 60 }, { "epoch": 0.12, "learning_rate": 0.0002, "loss": 0.6938, "step": 70 }, { "epoch": 0.14, "learning_rate": 0.0002, "loss": 0.4848, "step": 80 }, { "epoch": 0.15, "learning_rate": 0.0002, "loss": 0.4587, "step": 90 }, { "epoch": 0.17, "learning_rate": 0.0002, "loss": 0.5768, "step": 100 }, { "epoch": 0.19, "learning_rate": 0.0002, "loss": 0.4725, "step": 110 }, { "epoch": 0.2, "learning_rate": 0.0002, "loss": 0.5152, "step": 120 }, { "epoch": 0.22, "learning_rate": 0.0002, "loss": 0.5707, "step": 130 }, { "epoch": 0.24, "learning_rate": 0.0002, "loss": 0.5002, "step": 140 }, { "epoch": 0.25, "learning_rate": 0.0002, "loss": 0.4043, "step": 150 }, { "epoch": 0.27, "learning_rate": 0.0002, "loss": 0.6542, "step": 160 }, { "epoch": 0.29, "learning_rate": 0.0002, "loss": 0.4533, "step": 170 }, { "epoch": 0.31, "learning_rate": 0.0002, "loss": 0.5814, "step": 180 }, { "epoch": 0.32, "learning_rate": 0.0002, "loss": 0.525, "step": 190 }, { "epoch": 0.34, "learning_rate": 0.0002, "loss": 0.5448, "step": 200 }, { "epoch": 0.34, "eval_loss": 0.48780253529548645, "eval_runtime": 101.8557, "eval_samples_per_second": 9.818, "eval_steps_per_second": 4.909, "step": 200 }, { "epoch": 0.34, "mmlu_eval_accuracy": 0.4580124869645426, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.6428571428571429, "mmlu_eval_accuracy_astronomy": 0.5, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, "mmlu_eval_accuracy_college_biology": 0.5, "mmlu_eval_accuracy_college_chemistry": 0.125, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, "mmlu_eval_accuracy_college_medicine": 0.2727272727272727, "mmlu_eval_accuracy_college_physics": 0.5454545454545454, "mmlu_eval_accuracy_computer_security": 0.2727272727272727, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.16666666666666666, "mmlu_eval_accuracy_electrical_engineering": 0.4375, "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634, "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, "mmlu_eval_accuracy_global_facts": 0.4, "mmlu_eval_accuracy_high_school_biology": 0.375, "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, "mmlu_eval_accuracy_high_school_geography": 0.6363636363636364, "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714, "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667, "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654, "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, "mmlu_eval_accuracy_human_aging": 0.7391304347826086, "mmlu_eval_accuracy_human_sexuality": 0.5, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.36363636363636365, "mmlu_eval_accuracy_marketing": 0.68, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576, "mmlu_eval_accuracy_moral_scenarios": 0.23, "mmlu_eval_accuracy_nutrition": 0.5151515151515151, "mmlu_eval_accuracy_philosophy": 0.5294117647058824, "mmlu_eval_accuracy_prehistory": 0.5428571428571428, "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, "mmlu_eval_accuracy_professional_law": 0.3176470588235294, "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, "mmlu_eval_accuracy_public_relations": 0.6666666666666666, "mmlu_eval_accuracy_security_studies": 0.4444444444444444, "mmlu_eval_accuracy_sociology": 0.5, "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364, "mmlu_eval_accuracy_virology": 0.5, "mmlu_eval_accuracy_world_religions": 0.6842105263157895, "mmlu_loss": 0.9094281114112615, "step": 200 }, { "epoch": 0.36, "learning_rate": 0.0002, "loss": 0.4536, "step": 210 }, { "epoch": 0.37, "learning_rate": 0.0002, "loss": 0.5147, "step": 220 }, { "epoch": 0.39, "learning_rate": 0.0002, "loss": 0.423, "step": 230 }, { "epoch": 0.41, "learning_rate": 0.0002, "loss": 0.5832, "step": 240 }, { "epoch": 0.42, "learning_rate": 0.0002, "loss": 0.4719, "step": 250 }, { "epoch": 0.44, "learning_rate": 0.0002, "loss": 0.452, "step": 260 }, { "epoch": 0.46, "learning_rate": 0.0002, "loss": 0.4907, "step": 270 }, { "epoch": 0.48, "learning_rate": 0.0002, "loss": 0.5322, "step": 280 }, { "epoch": 0.49, "learning_rate": 0.0002, "loss": 0.592, "step": 290 }, { "epoch": 0.51, "learning_rate": 0.0002, "loss": 0.5964, "step": 300 }, { "epoch": 0.53, "learning_rate": 0.0002, "loss": 0.5404, "step": 310 }, { "epoch": 0.54, "learning_rate": 0.0002, "loss": 0.5788, "step": 320 }, { "epoch": 0.56, "learning_rate": 0.0002, "loss": 0.4701, "step": 330 }, { "epoch": 0.58, "learning_rate": 0.0002, "loss": 0.4899, "step": 340 }, { "epoch": 0.59, "learning_rate": 0.0002, "loss": 0.5177, "step": 350 }, { "epoch": 0.61, "learning_rate": 0.0002, "loss": 0.479, "step": 360 }, { "epoch": 0.63, "learning_rate": 0.0002, "loss": 0.4815, "step": 370 }, { "epoch": 0.65, "learning_rate": 0.0002, "loss": 0.4935, "step": 380 }, { "epoch": 0.66, "learning_rate": 0.0002, "loss": 0.5712, "step": 390 }, { "epoch": 0.68, "learning_rate": 0.0002, "loss": 0.4873, "step": 400 }, { "epoch": 0.68, "eval_loss": 0.4672442674636841, "eval_runtime": 102.1346, "eval_samples_per_second": 9.791, "eval_steps_per_second": 4.896, "step": 400 }, { "epoch": 0.68, "mmlu_eval_accuracy": 0.4486531670462866, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.5714285714285714, "mmlu_eval_accuracy_astronomy": 0.5, "mmlu_eval_accuracy_business_ethics": 0.45454545454545453, "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, "mmlu_eval_accuracy_college_biology": 0.4375, "mmlu_eval_accuracy_college_chemistry": 0.125, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, "mmlu_eval_accuracy_college_medicine": 0.2727272727272727, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.2727272727272727, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.25, "mmlu_eval_accuracy_electrical_engineering": 0.3125, "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244, "mmlu_eval_accuracy_formal_logic": 0.21428571428571427, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.40625, "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727, "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, "mmlu_eval_accuracy_high_school_european_history": 0.5, "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.75, "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364, "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.45454545454545453, "mmlu_eval_accuracy_marketing": 0.68, "mmlu_eval_accuracy_medical_genetics": 0.6363636363636364, "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, "mmlu_eval_accuracy_moral_disputes": 0.5, "mmlu_eval_accuracy_moral_scenarios": 0.25, "mmlu_eval_accuracy_nutrition": 0.5757575757575758, "mmlu_eval_accuracy_philosophy": 0.4411764705882353, "mmlu_eval_accuracy_prehistory": 0.5714285714285714, "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, "mmlu_eval_accuracy_professional_law": 0.32941176470588235, "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754, "mmlu_eval_accuracy_public_relations": 0.5, "mmlu_eval_accuracy_security_studies": 0.4444444444444444, "mmlu_eval_accuracy_sociology": 0.5909090909090909, "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273, "mmlu_eval_accuracy_virology": 0.4444444444444444, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 0.9122924784456159, "step": 400 }, { "epoch": 0.7, "learning_rate": 0.0002, "loss": 0.5392, "step": 410 }, { "epoch": 0.71, "learning_rate": 0.0002, "loss": 0.4237, "step": 420 }, { "epoch": 0.73, "learning_rate": 0.0002, "loss": 0.4864, "step": 430 }, { "epoch": 0.75, "learning_rate": 0.0002, "loss": 0.4317, "step": 440 }, { "epoch": 0.76, "learning_rate": 0.0002, "loss": 0.4613, "step": 450 }, { "epoch": 0.78, "learning_rate": 0.0002, "loss": 0.4595, "step": 460 }, { "epoch": 0.8, "learning_rate": 0.0002, "loss": 0.623, "step": 470 }, { "epoch": 0.82, "learning_rate": 0.0002, "loss": 0.5262, "step": 480 }, { "epoch": 0.83, "learning_rate": 0.0002, "loss": 0.4351, "step": 490 }, { "epoch": 0.85, "learning_rate": 0.0002, "loss": 0.5168, "step": 500 }, { "epoch": 0.87, "learning_rate": 0.0002, "loss": 0.4274, "step": 510 }, { "epoch": 0.88, "learning_rate": 0.0002, "loss": 0.5015, "step": 520 }, { "epoch": 0.9, "learning_rate": 0.0002, "loss": 0.4768, "step": 530 }, { "epoch": 0.92, "learning_rate": 0.0002, "loss": 0.4208, "step": 540 }, { "epoch": 0.93, "learning_rate": 0.0002, "loss": 0.4848, "step": 550 }, { "epoch": 0.95, "learning_rate": 0.0002, "loss": 0.4043, "step": 560 }, { "epoch": 0.97, "learning_rate": 0.0002, "loss": 0.4383, "step": 570 }, { "epoch": 0.99, "learning_rate": 0.0002, "loss": 0.5794, "step": 580 }, { "epoch": 1.0, "learning_rate": 0.0002, "loss": 0.439, "step": 590 }, { "epoch": 1.02, "learning_rate": 0.0002, "loss": 0.3456, "step": 600 }, { "epoch": 1.02, "eval_loss": 0.4632822871208191, "eval_runtime": 101.8929, "eval_samples_per_second": 9.814, "eval_steps_per_second": 4.907, "step": 600 }, { "epoch": 1.02, "mmlu_eval_accuracy": 0.4617338416384464, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.6428571428571429, "mmlu_eval_accuracy_astronomy": 0.4375, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, "mmlu_eval_accuracy_college_biology": 0.5, "mmlu_eval_accuracy_college_chemistry": 0.0, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, "mmlu_eval_accuracy_college_medicine": 0.36363636363636365, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.36363636363636365, "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231, "mmlu_eval_accuracy_econometrics": 0.25, "mmlu_eval_accuracy_electrical_engineering": 0.3125, "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244, "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, "mmlu_eval_accuracy_global_facts": 0.6, "mmlu_eval_accuracy_high_school_biology": 0.40625, "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453, "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, "mmlu_eval_accuracy_high_school_european_history": 0.5, "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667, "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364, "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, "mmlu_eval_accuracy_human_aging": 0.6521739130434783, "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.36363636363636365, "mmlu_eval_accuracy_management": 0.6363636363636364, "mmlu_eval_accuracy_marketing": 0.68, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576, "mmlu_eval_accuracy_moral_scenarios": 0.26, "mmlu_eval_accuracy_nutrition": 0.5454545454545454, "mmlu_eval_accuracy_philosophy": 0.5294117647058824, "mmlu_eval_accuracy_prehistory": 0.4857142857142857, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.35294117647058826, "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754, "mmlu_eval_accuracy_public_relations": 0.5, "mmlu_eval_accuracy_security_studies": 0.4074074074074074, "mmlu_eval_accuracy_sociology": 0.5454545454545454, "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, "mmlu_eval_accuracy_virology": 0.3888888888888889, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 0.8381480119110866, "step": 600 }, { "epoch": 1.04, "learning_rate": 0.0002, "loss": 0.3464, "step": 610 }, { "epoch": 1.05, "learning_rate": 0.0002, "loss": 0.4158, "step": 620 }, { "epoch": 1.07, "learning_rate": 0.0002, "loss": 0.3465, "step": 630 }, { "epoch": 1.09, "learning_rate": 0.0002, "loss": 0.3078, "step": 640 }, { "epoch": 1.1, "learning_rate": 0.0002, "loss": 0.4329, "step": 650 }, { "epoch": 1.12, "learning_rate": 0.0002, "loss": 0.3874, "step": 660 }, { "epoch": 1.14, "learning_rate": 0.0002, "loss": 0.4908, "step": 670 }, { "epoch": 1.15, "learning_rate": 0.0002, "loss": 0.5097, "step": 680 }, { "epoch": 1.17, "learning_rate": 0.0002, "loss": 0.3967, "step": 690 }, { "epoch": 1.19, "learning_rate": 0.0002, "loss": 0.4721, "step": 700 }, { "epoch": 1.21, "learning_rate": 0.0002, "loss": 0.3612, "step": 710 }, { "epoch": 1.22, "learning_rate": 0.0002, "loss": 0.4453, "step": 720 }, { "epoch": 1.24, "learning_rate": 0.0002, "loss": 0.4538, "step": 730 }, { "epoch": 1.26, "learning_rate": 0.0002, "loss": 0.3903, "step": 740 }, { "epoch": 1.27, "learning_rate": 0.0002, "loss": 0.3541, "step": 750 }, { "epoch": 1.29, "learning_rate": 0.0002, "loss": 0.3564, "step": 760 }, { "epoch": 1.31, "learning_rate": 0.0002, "loss": 0.386, "step": 770 }, { "epoch": 1.32, "learning_rate": 0.0002, "loss": 0.4495, "step": 780 }, { "epoch": 1.34, "learning_rate": 0.0002, "loss": 0.3281, "step": 790 }, { "epoch": 1.36, "learning_rate": 0.0002, "loss": 0.3315, "step": 800 }, { "epoch": 1.36, "eval_loss": 0.47132888436317444, "eval_runtime": 102.2178, "eval_samples_per_second": 9.783, "eval_steps_per_second": 4.892, "step": 800 }, { "epoch": 1.36, "mmlu_eval_accuracy": 0.4676211978570877, "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365, "mmlu_eval_accuracy_anatomy": 0.5714285714285714, "mmlu_eval_accuracy_astronomy": 0.4375, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, "mmlu_eval_accuracy_college_biology": 0.5, "mmlu_eval_accuracy_college_chemistry": 0.125, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, "mmlu_eval_accuracy_college_medicine": 0.3181818181818182, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.45454545454545453, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.25, "mmlu_eval_accuracy_electrical_engineering": 0.3125, "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073, "mmlu_eval_accuracy_formal_logic": 0.21428571428571427, "mmlu_eval_accuracy_global_facts": 0.6, "mmlu_eval_accuracy_high_school_biology": 0.4375, "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, "mmlu_eval_accuracy_high_school_european_history": 0.5, "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723, "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, "mmlu_eval_accuracy_high_school_psychology": 0.75, "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, "mmlu_eval_accuracy_high_school_world_history": 0.5, "mmlu_eval_accuracy_human_aging": 0.7391304347826086, "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.18181818181818182, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.36363636363636365, "mmlu_eval_accuracy_management": 0.5454545454545454, "mmlu_eval_accuracy_marketing": 0.68, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, "mmlu_eval_accuracy_moral_disputes": 0.5, "mmlu_eval_accuracy_moral_scenarios": 0.29, "mmlu_eval_accuracy_nutrition": 0.5757575757575758, "mmlu_eval_accuracy_philosophy": 0.5, "mmlu_eval_accuracy_prehistory": 0.5142857142857142, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.34705882352941175, "mmlu_eval_accuracy_professional_medicine": 0.3870967741935484, "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406, "mmlu_eval_accuracy_public_relations": 0.6666666666666666, "mmlu_eval_accuracy_security_studies": 0.37037037037037035, "mmlu_eval_accuracy_sociology": 0.5909090909090909, "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273, "mmlu_eval_accuracy_virology": 0.3888888888888889, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 0.8817933564495013, "step": 800 }, { "epoch": 1.38, "learning_rate": 0.0002, "loss": 0.4534, "step": 810 }, { "epoch": 1.39, "learning_rate": 0.0002, "loss": 0.3944, "step": 820 }, { "epoch": 1.41, "learning_rate": 0.0002, "loss": 0.4096, "step": 830 }, { "epoch": 1.43, "learning_rate": 0.0002, "loss": 0.4039, "step": 840 }, { "epoch": 1.44, "learning_rate": 0.0002, "loss": 0.438, "step": 850 }, { "epoch": 1.46, "learning_rate": 0.0002, "loss": 0.3773, "step": 860 }, { "epoch": 1.48, "learning_rate": 0.0002, "loss": 0.4969, "step": 870 }, { "epoch": 1.49, "learning_rate": 0.0002, "loss": 0.396, "step": 880 }, { "epoch": 1.51, "learning_rate": 0.0002, "loss": 0.4196, "step": 890 }, { "epoch": 1.53, "learning_rate": 0.0002, "loss": 0.5202, "step": 900 }, { "epoch": 1.55, "learning_rate": 0.0002, "loss": 0.4728, "step": 910 }, { "epoch": 1.56, "learning_rate": 0.0002, "loss": 0.4229, "step": 920 }, { "epoch": 1.58, "learning_rate": 0.0002, "loss": 0.4879, "step": 930 }, { "epoch": 1.6, "learning_rate": 0.0002, "loss": 0.4288, "step": 940 }, { "epoch": 1.61, "learning_rate": 0.0002, "loss": 0.4085, "step": 950 }, { "epoch": 1.63, "learning_rate": 0.0002, "loss": 0.3793, "step": 960 }, { "epoch": 1.65, "learning_rate": 0.0002, "loss": 0.503, "step": 970 }, { "epoch": 1.66, "learning_rate": 0.0002, "loss": 0.3353, "step": 980 }, { "epoch": 1.68, "learning_rate": 0.0002, "loss": 0.4212, "step": 990 }, { "epoch": 1.7, "learning_rate": 0.0002, "loss": 0.341, "step": 1000 }, { "epoch": 1.7, "eval_loss": 0.4644744396209717, "eval_runtime": 102.0988, "eval_samples_per_second": 9.794, "eval_steps_per_second": 4.897, "step": 1000 }, { "epoch": 1.7, "mmlu_eval_accuracy": 0.4532186308687434, "mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182, "mmlu_eval_accuracy_anatomy": 0.5714285714285714, "mmlu_eval_accuracy_astronomy": 0.5, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, "mmlu_eval_accuracy_college_biology": 0.4375, "mmlu_eval_accuracy_college_chemistry": 0.25, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, "mmlu_eval_accuracy_college_medicine": 0.3181818181818182, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.2727272727272727, "mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156, "mmlu_eval_accuracy_econometrics": 0.16666666666666666, "mmlu_eval_accuracy_electrical_engineering": 0.375, "mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634, "mmlu_eval_accuracy_formal_logic": 0.21428571428571427, "mmlu_eval_accuracy_global_facts": 0.6, "mmlu_eval_accuracy_high_school_biology": 0.375, "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, "mmlu_eval_accuracy_high_school_european_history": 0.5, "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, "mmlu_eval_accuracy_high_school_mathematics": 0.10344827586206896, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333, "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, "mmlu_eval_accuracy_high_school_us_history": 0.5454545454545454, "mmlu_eval_accuracy_high_school_world_history": 0.5, "mmlu_eval_accuracy_human_aging": 0.7391304347826086, "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112, "mmlu_eval_accuracy_machine_learning": 0.36363636363636365, "mmlu_eval_accuracy_management": 0.45454545454545453, "mmlu_eval_accuracy_marketing": 0.72, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628, "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842, "mmlu_eval_accuracy_moral_scenarios": 0.24, "mmlu_eval_accuracy_nutrition": 0.48484848484848486, "mmlu_eval_accuracy_philosophy": 0.47058823529411764, "mmlu_eval_accuracy_prehistory": 0.4857142857142857, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.3352941176470588, "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, "mmlu_eval_accuracy_professional_psychology": 0.4492753623188406, "mmlu_eval_accuracy_public_relations": 0.5833333333333334, "mmlu_eval_accuracy_security_studies": 0.48148148148148145, "mmlu_eval_accuracy_sociology": 0.5454545454545454, "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, "mmlu_eval_accuracy_virology": 0.3888888888888889, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 0.9635014412705799, "step": 1000 }, { "epoch": 1.72, "learning_rate": 0.0002, "loss": 0.4641, "step": 1010 }, { "epoch": 1.73, "learning_rate": 0.0002, "loss": 0.4247, "step": 1020 }, { "epoch": 1.75, "learning_rate": 0.0002, "loss": 0.3628, "step": 1030 }, { "epoch": 1.77, "learning_rate": 0.0002, "loss": 0.3531, "step": 1040 }, { "epoch": 1.78, "learning_rate": 0.0002, "loss": 0.407, "step": 1050 }, { "epoch": 1.8, "learning_rate": 0.0002, "loss": 0.3457, "step": 1060 }, { "epoch": 1.82, "learning_rate": 0.0002, "loss": 0.3636, "step": 1070 }, { "epoch": 1.83, "learning_rate": 0.0002, "loss": 0.4112, "step": 1080 }, { "epoch": 1.85, "learning_rate": 0.0002, "loss": 0.4043, "step": 1090 }, { "epoch": 1.87, "learning_rate": 0.0002, "loss": 0.4891, "step": 1100 }, { "epoch": 1.89, "learning_rate": 0.0002, "loss": 0.4216, "step": 1110 }, { "epoch": 1.9, "learning_rate": 0.0002, "loss": 0.2883, "step": 1120 }, { "epoch": 1.92, "learning_rate": 0.0002, "loss": 0.4063, "step": 1130 }, { "epoch": 1.94, "learning_rate": 0.0002, "loss": 0.3683, "step": 1140 }, { "epoch": 1.95, "learning_rate": 0.0002, "loss": 0.3717, "step": 1150 }, { "epoch": 1.97, "learning_rate": 0.0002, "loss": 0.4374, "step": 1160 }, { "epoch": 1.99, "learning_rate": 0.0002, "loss": 0.4172, "step": 1170 }, { "epoch": 2.0, "learning_rate": 0.0002, "loss": 0.2856, "step": 1180 }, { "epoch": 2.02, "learning_rate": 0.0002, "loss": 0.265, "step": 1190 }, { "epoch": 2.04, "learning_rate": 0.0002, "loss": 0.2448, "step": 1200 }, { "epoch": 2.04, "eval_loss": 0.5078285336494446, "eval_runtime": 102.2139, "eval_samples_per_second": 9.783, "eval_steps_per_second": 4.892, "step": 1200 }, { "epoch": 2.04, "mmlu_eval_accuracy": 0.45134380789496525, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.6428571428571429, "mmlu_eval_accuracy_astronomy": 0.5, "mmlu_eval_accuracy_business_ethics": 0.7272727272727273, "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, "mmlu_eval_accuracy_college_biology": 0.375, "mmlu_eval_accuracy_college_chemistry": 0.25, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365, "mmlu_eval_accuracy_college_medicine": 0.45454545454545453, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.2727272727272727, "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231, "mmlu_eval_accuracy_econometrics": 0.25, "mmlu_eval_accuracy_electrical_engineering": 0.125, "mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244, "mmlu_eval_accuracy_formal_logic": 0.35714285714285715, "mmlu_eval_accuracy_global_facts": 0.4, "mmlu_eval_accuracy_high_school_biology": 0.375, "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666, "mmlu_eval_accuracy_high_school_european_history": 0.5, "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238, "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395, "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077, "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, "mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333, "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.25, "mmlu_eval_accuracy_international_law": 0.8461538461538461, "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727, "mmlu_eval_accuracy_logical_fallacies": 0.5, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.45454545454545453, "mmlu_eval_accuracy_marketing": 0.68, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6162790697674418, "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842, "mmlu_eval_accuracy_moral_scenarios": 0.25, "mmlu_eval_accuracy_nutrition": 0.5151515151515151, "mmlu_eval_accuracy_philosophy": 0.5, "mmlu_eval_accuracy_prehistory": 0.4, "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, "mmlu_eval_accuracy_professional_law": 0.34705882352941175, "mmlu_eval_accuracy_professional_medicine": 0.3548387096774194, "mmlu_eval_accuracy_professional_psychology": 0.463768115942029, "mmlu_eval_accuracy_public_relations": 0.5833333333333334, "mmlu_eval_accuracy_security_studies": 0.4444444444444444, "mmlu_eval_accuracy_sociology": 0.5, "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, "mmlu_eval_accuracy_virology": 0.3888888888888889, "mmlu_eval_accuracy_world_religions": 0.6842105263157895, "mmlu_loss": 0.9378224162934421, "step": 1200 } ], "max_steps": 5000, "num_train_epochs": 9, "total_flos": 1.1171389466360218e+17, "trial_name": null, "trial_params": null }