{ "best_metric": 0.6129981875419617, "best_model_checkpoint": "./output_v2/7b_cluster028_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_028/checkpoint-800", "epoch": 0.8125952260030472, "global_step": 800, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "learning_rate": 0.0002, "loss": 0.8709, "step": 10 }, { "epoch": 0.02, "learning_rate": 0.0002, "loss": 0.9602, "step": 20 }, { "epoch": 0.03, "learning_rate": 0.0002, "loss": 0.7606, "step": 30 }, { "epoch": 0.04, "learning_rate": 0.0002, "loss": 0.7942, "step": 40 }, { "epoch": 0.05, "learning_rate": 0.0002, "loss": 0.5849, "step": 50 }, { "epoch": 0.06, "learning_rate": 0.0002, "loss": 0.7161, "step": 60 }, { "epoch": 0.07, "learning_rate": 0.0002, "loss": 0.7699, "step": 70 }, { "epoch": 0.08, "learning_rate": 0.0002, "loss": 0.7264, "step": 80 }, { "epoch": 0.09, "learning_rate": 0.0002, "loss": 0.6845, "step": 90 }, { "epoch": 0.1, "learning_rate": 0.0002, "loss": 0.6638, "step": 100 }, { "epoch": 0.11, "learning_rate": 0.0002, "loss": 0.6089, "step": 110 }, { "epoch": 0.12, "learning_rate": 0.0002, "loss": 0.7681, "step": 120 }, { "epoch": 0.13, "learning_rate": 0.0002, "loss": 0.7489, "step": 130 }, { "epoch": 0.14, "learning_rate": 0.0002, "loss": 0.7472, "step": 140 }, { "epoch": 0.15, "learning_rate": 0.0002, "loss": 0.8521, "step": 150 }, { "epoch": 0.16, "learning_rate": 0.0002, "loss": 0.7223, "step": 160 }, { "epoch": 0.17, "learning_rate": 0.0002, "loss": 0.6727, "step": 170 }, { "epoch": 0.18, "learning_rate": 0.0002, "loss": 0.6434, "step": 180 }, { "epoch": 0.19, "learning_rate": 0.0002, "loss": 0.6754, "step": 190 }, { "epoch": 0.2, "learning_rate": 0.0002, "loss": 0.6945, "step": 200 }, { "epoch": 0.2, "eval_loss": 0.6316617727279663, "eval_runtime": 120.7896, "eval_samples_per_second": 8.279, "eval_steps_per_second": 4.139, "step": 200 }, { "epoch": 0.2, "mmlu_eval_accuracy": 0.46933615423997516, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.5714285714285714, "mmlu_eval_accuracy_astronomy": 0.375, "mmlu_eval_accuracy_business_ethics": 0.5454545454545454, "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, "mmlu_eval_accuracy_college_biology": 0.4375, "mmlu_eval_accuracy_college_chemistry": 0.25, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365, "mmlu_eval_accuracy_college_medicine": 0.4090909090909091, "mmlu_eval_accuracy_college_physics": 0.5454545454545454, "mmlu_eval_accuracy_computer_security": 0.36363636363636365, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.16666666666666666, "mmlu_eval_accuracy_electrical_engineering": 0.375, "mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024, "mmlu_eval_accuracy_formal_logic": 0.35714285714285715, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.34375, "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444, "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112, "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907, "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724, "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.7, "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, "mmlu_eval_accuracy_high_school_world_history": 0.5, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.5, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.5454545454545454, "mmlu_eval_accuracy_marketing": 0.72, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745, "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576, "mmlu_eval_accuracy_moral_scenarios": 0.23, "mmlu_eval_accuracy_nutrition": 0.6060606060606061, "mmlu_eval_accuracy_philosophy": 0.47058823529411764, "mmlu_eval_accuracy_prehistory": 0.45714285714285713, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.3411764705882353, "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, "mmlu_eval_accuracy_professional_psychology": 0.3188405797101449, "mmlu_eval_accuracy_public_relations": 0.5, "mmlu_eval_accuracy_security_studies": 0.48148148148148145, "mmlu_eval_accuracy_sociology": 0.6818181818181818, "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273, "mmlu_eval_accuracy_virology": 0.3333333333333333, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 0.9820772503398106, "step": 200 }, { "epoch": 0.21, "learning_rate": 0.0002, "loss": 0.6532, "step": 210 }, { "epoch": 0.22, "learning_rate": 0.0002, "loss": 0.7207, "step": 220 }, { "epoch": 0.23, "learning_rate": 0.0002, "loss": 0.7092, "step": 230 }, { "epoch": 0.24, "learning_rate": 0.0002, "loss": 0.6561, "step": 240 }, { "epoch": 0.25, "learning_rate": 0.0002, "loss": 0.6516, "step": 250 }, { "epoch": 0.26, "learning_rate": 0.0002, "loss": 0.6293, "step": 260 }, { "epoch": 0.27, "learning_rate": 0.0002, "loss": 0.6238, "step": 270 }, { "epoch": 0.28, "learning_rate": 0.0002, "loss": 0.6484, "step": 280 }, { "epoch": 0.29, "learning_rate": 0.0002, "loss": 0.6795, "step": 290 }, { "epoch": 0.3, "learning_rate": 0.0002, "loss": 0.5931, "step": 300 }, { "epoch": 0.31, "learning_rate": 0.0002, "loss": 0.7188, "step": 310 }, { "epoch": 0.33, "learning_rate": 0.0002, "loss": 0.6823, "step": 320 }, { "epoch": 0.34, "learning_rate": 0.0002, "loss": 0.7286, "step": 330 }, { "epoch": 0.35, "learning_rate": 0.0002, "loss": 0.7396, "step": 340 }, { "epoch": 0.36, "learning_rate": 0.0002, "loss": 0.6779, "step": 350 }, { "epoch": 0.37, "learning_rate": 0.0002, "loss": 0.7003, "step": 360 }, { "epoch": 0.38, "learning_rate": 0.0002, "loss": 0.6721, "step": 370 }, { "epoch": 0.39, "learning_rate": 0.0002, "loss": 0.736, "step": 380 }, { "epoch": 0.4, "learning_rate": 0.0002, "loss": 0.6221, "step": 390 }, { "epoch": 0.41, "learning_rate": 0.0002, "loss": 0.6736, "step": 400 }, { "epoch": 0.41, "eval_loss": 0.6207628846168518, "eval_runtime": 120.8451, "eval_samples_per_second": 8.275, "eval_steps_per_second": 4.138, "step": 400 }, { "epoch": 0.41, "mmlu_eval_accuracy": 0.4837331454649875, "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365, "mmlu_eval_accuracy_anatomy": 0.7142857142857143, "mmlu_eval_accuracy_astronomy": 0.3125, "mmlu_eval_accuracy_business_ethics": 0.5454545454545454, "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552, "mmlu_eval_accuracy_college_biology": 0.5625, "mmlu_eval_accuracy_college_chemistry": 0.125, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365, "mmlu_eval_accuracy_college_medicine": 0.45454545454545453, "mmlu_eval_accuracy_college_physics": 0.5454545454545454, "mmlu_eval_accuracy_computer_security": 0.5454545454545454, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.08333333333333333, "mmlu_eval_accuracy_electrical_engineering": 0.375, "mmlu_eval_accuracy_elementary_mathematics": 0.24390243902439024, "mmlu_eval_accuracy_formal_logic": 0.42857142857142855, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.375, "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182, "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, "mmlu_eval_accuracy_high_school_geography": 0.8181818181818182, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395, "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464, "mmlu_eval_accuracy_high_school_physics": 0.4117647058823529, "mmlu_eval_accuracy_high_school_psychology": 0.7666666666666667, "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087, "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273, "mmlu_eval_accuracy_high_school_world_history": 0.5, "mmlu_eval_accuracy_human_aging": 0.6521739130434783, "mmlu_eval_accuracy_human_sexuality": 0.5, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.6363636363636364, "mmlu_eval_accuracy_marketing": 0.76, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842, "mmlu_eval_accuracy_moral_scenarios": 0.25, "mmlu_eval_accuracy_nutrition": 0.5757575757575758, "mmlu_eval_accuracy_philosophy": 0.5, "mmlu_eval_accuracy_prehistory": 0.4857142857142857, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.3235294117647059, "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856, "mmlu_eval_accuracy_public_relations": 0.4166666666666667, "mmlu_eval_accuracy_security_studies": 0.5185185185185185, "mmlu_eval_accuracy_sociology": 0.6818181818181818, "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, "mmlu_eval_accuracy_virology": 0.3333333333333333, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 1.0501697772321128, "step": 400 }, { "epoch": 0.42, "learning_rate": 0.0002, "loss": 0.6737, "step": 410 }, { "epoch": 0.43, "learning_rate": 0.0002, "loss": 0.6234, "step": 420 }, { "epoch": 0.44, "learning_rate": 0.0002, "loss": 0.6819, "step": 430 }, { "epoch": 0.45, "learning_rate": 0.0002, "loss": 0.6338, "step": 440 }, { "epoch": 0.46, "learning_rate": 0.0002, "loss": 0.8598, "step": 450 }, { "epoch": 0.47, "learning_rate": 0.0002, "loss": 0.6242, "step": 460 }, { "epoch": 0.48, "learning_rate": 0.0002, "loss": 0.6475, "step": 470 }, { "epoch": 0.49, "learning_rate": 0.0002, "loss": 0.6648, "step": 480 }, { "epoch": 0.5, "learning_rate": 0.0002, "loss": 0.6701, "step": 490 }, { "epoch": 0.51, "learning_rate": 0.0002, "loss": 0.6111, "step": 500 }, { "epoch": 0.52, "learning_rate": 0.0002, "loss": 0.7534, "step": 510 }, { "epoch": 0.53, "learning_rate": 0.0002, "loss": 0.6295, "step": 520 }, { "epoch": 0.54, "learning_rate": 0.0002, "loss": 0.6684, "step": 530 }, { "epoch": 0.55, "learning_rate": 0.0002, "loss": 0.6345, "step": 540 }, { "epoch": 0.56, "learning_rate": 0.0002, "loss": 0.6401, "step": 550 }, { "epoch": 0.57, "learning_rate": 0.0002, "loss": 0.6682, "step": 560 }, { "epoch": 0.58, "learning_rate": 0.0002, "loss": 0.7064, "step": 570 }, { "epoch": 0.59, "learning_rate": 0.0002, "loss": 0.5483, "step": 580 }, { "epoch": 0.6, "learning_rate": 0.0002, "loss": 0.6306, "step": 590 }, { "epoch": 0.61, "learning_rate": 0.0002, "loss": 0.624, "step": 600 }, { "epoch": 0.61, "eval_loss": 0.6136035323143005, "eval_runtime": 120.795, "eval_samples_per_second": 8.278, "eval_steps_per_second": 4.139, "step": 600 }, { "epoch": 0.61, "mmlu_eval_accuracy": 0.4829167430977062, "mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091, "mmlu_eval_accuracy_anatomy": 0.5714285714285714, "mmlu_eval_accuracy_astronomy": 0.5, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, "mmlu_eval_accuracy_college_biology": 0.375, "mmlu_eval_accuracy_college_chemistry": 0.375, "mmlu_eval_accuracy_college_computer_science": 0.5454545454545454, "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, "mmlu_eval_accuracy_college_medicine": 0.36363636363636365, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.36363636363636365, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.25, "mmlu_eval_accuracy_electrical_engineering": 0.4375, "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536, "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, "mmlu_eval_accuracy_global_facts": 0.6, "mmlu_eval_accuracy_high_school_biology": 0.34375, "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, "mmlu_eval_accuracy_high_school_computer_science": 0.7777777777777778, "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112, "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727, "mmlu_eval_accuracy_high_school_government_and_politics": 0.6190476190476191, "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156, "mmlu_eval_accuracy_high_school_physics": 0.29411764705882354, "mmlu_eval_accuracy_high_school_psychology": 0.6833333333333333, "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, "mmlu_eval_accuracy_high_school_world_history": 0.6538461538461539, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.6363636363636364, "mmlu_eval_accuracy_marketing": 0.8, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316, "mmlu_eval_accuracy_moral_scenarios": 0.23, "mmlu_eval_accuracy_nutrition": 0.5757575757575758, "mmlu_eval_accuracy_philosophy": 0.5294117647058824, "mmlu_eval_accuracy_prehistory": 0.4, "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903, "mmlu_eval_accuracy_professional_law": 0.3588235294117647, "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, "mmlu_eval_accuracy_professional_psychology": 0.36231884057971014, "mmlu_eval_accuracy_public_relations": 0.5, "mmlu_eval_accuracy_security_studies": 0.5185185185185185, "mmlu_eval_accuracy_sociology": 0.6363636363636364, "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364, "mmlu_eval_accuracy_virology": 0.4444444444444444, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 1.063391035281647, "step": 600 }, { "epoch": 0.62, "learning_rate": 0.0002, "loss": 0.6211, "step": 610 }, { "epoch": 0.63, "learning_rate": 0.0002, "loss": 0.6347, "step": 620 }, { "epoch": 0.64, "learning_rate": 0.0002, "loss": 0.727, "step": 630 }, { "epoch": 0.65, "learning_rate": 0.0002, "loss": 0.6753, "step": 640 }, { "epoch": 0.66, "learning_rate": 0.0002, "loss": 0.674, "step": 650 }, { "epoch": 0.67, "learning_rate": 0.0002, "loss": 0.7054, "step": 660 }, { "epoch": 0.68, "learning_rate": 0.0002, "loss": 0.7221, "step": 670 }, { "epoch": 0.69, "learning_rate": 0.0002, "loss": 0.6147, "step": 680 }, { "epoch": 0.7, "learning_rate": 0.0002, "loss": 0.693, "step": 690 }, { "epoch": 0.71, "learning_rate": 0.0002, "loss": 0.6348, "step": 700 }, { "epoch": 0.72, "learning_rate": 0.0002, "loss": 0.604, "step": 710 }, { "epoch": 0.73, "learning_rate": 0.0002, "loss": 0.5798, "step": 720 }, { "epoch": 0.74, "learning_rate": 0.0002, "loss": 0.5844, "step": 730 }, { "epoch": 0.75, "learning_rate": 0.0002, "loss": 0.6679, "step": 740 }, { "epoch": 0.76, "learning_rate": 0.0002, "loss": 0.6564, "step": 750 }, { "epoch": 0.77, "learning_rate": 0.0002, "loss": 0.7336, "step": 760 }, { "epoch": 0.78, "learning_rate": 0.0002, "loss": 0.7271, "step": 770 }, { "epoch": 0.79, "learning_rate": 0.0002, "loss": 0.6606, "step": 780 }, { "epoch": 0.8, "learning_rate": 0.0002, "loss": 0.6415, "step": 790 }, { "epoch": 0.81, "learning_rate": 0.0002, "loss": 0.6775, "step": 800 }, { "epoch": 0.81, "eval_loss": 0.6129981875419617, "eval_runtime": 120.8205, "eval_samples_per_second": 8.277, "eval_steps_per_second": 4.138, "step": 800 }, { "epoch": 0.81, "mmlu_eval_accuracy": 0.46517376517531633, "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, "mmlu_eval_accuracy_anatomy": 0.6428571428571429, "mmlu_eval_accuracy_astronomy": 0.4375, "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, "mmlu_eval_accuracy_college_biology": 0.4375, "mmlu_eval_accuracy_college_chemistry": 0.0, "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, "mmlu_eval_accuracy_college_medicine": 0.3181818181818182, "mmlu_eval_accuracy_college_physics": 0.45454545454545453, "mmlu_eval_accuracy_computer_security": 0.45454545454545453, "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, "mmlu_eval_accuracy_econometrics": 0.16666666666666666, "mmlu_eval_accuracy_electrical_engineering": 0.3125, "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073, "mmlu_eval_accuracy_formal_logic": 0.21428571428571427, "mmlu_eval_accuracy_global_facts": 0.5, "mmlu_eval_accuracy_high_school_biology": 0.46875, "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365, "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, "mmlu_eval_accuracy_high_school_geography": 0.7727272727272727, "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714, "mmlu_eval_accuracy_high_school_macroeconomics": 0.37209302325581395, "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464, "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882, "mmlu_eval_accuracy_high_school_psychology": 0.7, "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173, "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273, "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, "mmlu_eval_accuracy_human_aging": 0.6956521739130435, "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333, "mmlu_eval_accuracy_international_law": 0.7692307692307693, "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, "mmlu_eval_accuracy_management": 0.6363636363636364, "mmlu_eval_accuracy_marketing": 0.72, "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, "mmlu_eval_accuracy_miscellaneous": 0.686046511627907, "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735, "mmlu_eval_accuracy_moral_scenarios": 0.25, "mmlu_eval_accuracy_nutrition": 0.5757575757575758, "mmlu_eval_accuracy_philosophy": 0.5588235294117647, "mmlu_eval_accuracy_prehistory": 0.42857142857142855, "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613, "mmlu_eval_accuracy_professional_law": 0.3588235294117647, "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355, "mmlu_eval_accuracy_professional_psychology": 0.37681159420289856, "mmlu_eval_accuracy_public_relations": 0.5833333333333334, "mmlu_eval_accuracy_security_studies": 0.5185185185185185, "mmlu_eval_accuracy_sociology": 0.5454545454545454, "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273, "mmlu_eval_accuracy_virology": 0.5, "mmlu_eval_accuracy_world_religions": 0.7368421052631579, "mmlu_loss": 1.0104573983553184, "step": 800 } ], "max_steps": 5000, "num_train_epochs": 6, "total_flos": 1.01621189025792e+17, "trial_name": null, "trial_params": null }