prateeky2806's picture
Training in progress, step 600
4dd14b6
{
"best_metric": 0.8252214193344116,
"best_model_checkpoint": "./output_v2/7b_cluster015_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_015/checkpoint-600",
"epoch": 0.4022795843110962,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.969,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 0.0002,
"loss": 0.9032,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 0.0002,
"loss": 0.81,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.876,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 0.0002,
"loss": 0.8858,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 0.0002,
"loss": 0.8608,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.846,
"step": 70
},
{
"epoch": 0.05,
"learning_rate": 0.0002,
"loss": 0.8466,
"step": 80
},
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 0.8456,
"step": 90
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.8895,
"step": 100
},
{
"epoch": 0.07,
"learning_rate": 0.0002,
"loss": 0.862,
"step": 110
},
{
"epoch": 0.08,
"learning_rate": 0.0002,
"loss": 0.8193,
"step": 120
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.8588,
"step": 130
},
{
"epoch": 0.09,
"learning_rate": 0.0002,
"loss": 0.8516,
"step": 140
},
{
"epoch": 0.1,
"learning_rate": 0.0002,
"loss": 0.8428,
"step": 150
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.8829,
"step": 160
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 0.882,
"step": 170
},
{
"epoch": 0.12,
"learning_rate": 0.0002,
"loss": 0.8054,
"step": 180
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.8673,
"step": 190
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 0.8389,
"step": 200
},
{
"epoch": 0.13,
"eval_loss": 0.8394724130630493,
"eval_runtime": 191.2112,
"eval_samples_per_second": 5.23,
"eval_steps_per_second": 2.615,
"step": 200
},
{
"epoch": 0.13,
"mmlu_eval_accuracy": 0.4626311671628311,
"mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
"mmlu_eval_accuracy_anatomy": 0.5,
"mmlu_eval_accuracy_astronomy": 0.375,
"mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.5454545454545454,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.5,
"mmlu_eval_accuracy_elementary_mathematics": 0.3902439024390244,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
"mmlu_eval_accuracy_high_school_computer_science": 0.7777777777777778,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3023255813953488,
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
"mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
"mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.6363636363636364,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.6976744186046512,
"mmlu_eval_accuracy_moral_disputes": 0.5,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.5454545454545454,
"mmlu_eval_accuracy_philosophy": 0.5294117647058824,
"mmlu_eval_accuracy_prehistory": 0.42857142857142855,
"mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
"mmlu_eval_accuracy_professional_law": 0.3235294117647059,
"mmlu_eval_accuracy_professional_medicine": 0.3870967741935484,
"mmlu_eval_accuracy_professional_psychology": 0.4057971014492754,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.6363636363636364,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.4444444444444444,
"mmlu_eval_accuracy_world_religions": 0.7368421052631579,
"mmlu_loss": 1.0846105751924353,
"step": 200
},
{
"epoch": 0.14,
"learning_rate": 0.0002,
"loss": 0.9027,
"step": 210
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8621,
"step": 220
},
{
"epoch": 0.15,
"learning_rate": 0.0002,
"loss": 0.8405,
"step": 230
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 0.8553,
"step": 240
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.8334,
"step": 250
},
{
"epoch": 0.17,
"learning_rate": 0.0002,
"loss": 0.8791,
"step": 260
},
{
"epoch": 0.18,
"learning_rate": 0.0002,
"loss": 0.8607,
"step": 270
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.8403,
"step": 280
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 0.8471,
"step": 290
},
{
"epoch": 0.2,
"learning_rate": 0.0002,
"loss": 0.8945,
"step": 300
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.8094,
"step": 310
},
{
"epoch": 0.21,
"learning_rate": 0.0002,
"loss": 0.8571,
"step": 320
},
{
"epoch": 0.22,
"learning_rate": 0.0002,
"loss": 0.8469,
"step": 330
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.8609,
"step": 340
},
{
"epoch": 0.23,
"learning_rate": 0.0002,
"loss": 0.8242,
"step": 350
},
{
"epoch": 0.24,
"learning_rate": 0.0002,
"loss": 0.8679,
"step": 360
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.8583,
"step": 370
},
{
"epoch": 0.25,
"learning_rate": 0.0002,
"loss": 0.8815,
"step": 380
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 0.819,
"step": 390
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.8946,
"step": 400
},
{
"epoch": 0.27,
"eval_loss": 0.8304864764213562,
"eval_runtime": 191.0197,
"eval_samples_per_second": 5.235,
"eval_steps_per_second": 2.618,
"step": 400
},
{
"epoch": 0.27,
"mmlu_eval_accuracy": 0.45184631712481954,
"mmlu_eval_accuracy_abstract_algebra": 0.18181818181818182,
"mmlu_eval_accuracy_anatomy": 0.5,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.3793103448275862,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.125,
"mmlu_eval_accuracy_college_computer_science": 0.45454545454545453,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.46153846153846156,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.5,
"mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.4,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
"mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.3023255813953488,
"mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
"mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
"mmlu_eval_accuracy_high_school_physics": 0.29411764705882354,
"mmlu_eval_accuracy_high_school_psychology": 0.7,
"mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
"mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
"mmlu_eval_accuracy_human_aging": 0.7391304347826086,
"mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
"mmlu_eval_accuracy_international_law": 0.7692307692307693,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.45454545454545453,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.42105263157894735,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.6060606060606061,
"mmlu_eval_accuracy_philosophy": 0.5588235294117647,
"mmlu_eval_accuracy_prehistory": 0.4,
"mmlu_eval_accuracy_professional_accounting": 0.25806451612903225,
"mmlu_eval_accuracy_professional_law": 0.3176470588235294,
"mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
"mmlu_eval_accuracy_professional_psychology": 0.391304347826087,
"mmlu_eval_accuracy_public_relations": 0.5,
"mmlu_eval_accuracy_security_studies": 0.5185185185185185,
"mmlu_eval_accuracy_sociology": 0.5909090909090909,
"mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
"mmlu_eval_accuracy_virology": 0.4444444444444444,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.1180029823286726,
"step": 400
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 0.8577,
"step": 410
},
{
"epoch": 0.28,
"learning_rate": 0.0002,
"loss": 0.8594,
"step": 420
},
{
"epoch": 0.29,
"learning_rate": 0.0002,
"loss": 0.8559,
"step": 430
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.8602,
"step": 440
},
{
"epoch": 0.3,
"learning_rate": 0.0002,
"loss": 0.8196,
"step": 450
},
{
"epoch": 0.31,
"learning_rate": 0.0002,
"loss": 0.8601,
"step": 460
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.8412,
"step": 470
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 0.8543,
"step": 480
},
{
"epoch": 0.33,
"learning_rate": 0.0002,
"loss": 0.8705,
"step": 490
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.7979,
"step": 500
},
{
"epoch": 0.34,
"learning_rate": 0.0002,
"loss": 0.8179,
"step": 510
},
{
"epoch": 0.35,
"learning_rate": 0.0002,
"loss": 0.8842,
"step": 520
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.7691,
"step": 530
},
{
"epoch": 0.36,
"learning_rate": 0.0002,
"loss": 0.8867,
"step": 540
},
{
"epoch": 0.37,
"learning_rate": 0.0002,
"loss": 0.8812,
"step": 550
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.8507,
"step": 560
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 0.8627,
"step": 570
},
{
"epoch": 0.39,
"learning_rate": 0.0002,
"loss": 0.8451,
"step": 580
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.8396,
"step": 590
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 0.8756,
"step": 600
},
{
"epoch": 0.4,
"eval_loss": 0.8252214193344116,
"eval_runtime": 190.9921,
"eval_samples_per_second": 5.236,
"eval_steps_per_second": 2.618,
"step": 600
},
{
"epoch": 0.4,
"mmlu_eval_accuracy": 0.46243828535426434,
"mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
"mmlu_eval_accuracy_anatomy": 0.5714285714285714,
"mmlu_eval_accuracy_astronomy": 0.4375,
"mmlu_eval_accuracy_business_ethics": 0.45454545454545453,
"mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
"mmlu_eval_accuracy_college_biology": 0.4375,
"mmlu_eval_accuracy_college_chemistry": 0.25,
"mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
"mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
"mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
"mmlu_eval_accuracy_college_physics": 0.45454545454545453,
"mmlu_eval_accuracy_computer_security": 0.36363636363636365,
"mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
"mmlu_eval_accuracy_econometrics": 0.16666666666666666,
"mmlu_eval_accuracy_electrical_engineering": 0.4375,
"mmlu_eval_accuracy_elementary_mathematics": 0.4146341463414634,
"mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
"mmlu_eval_accuracy_global_facts": 0.5,
"mmlu_eval_accuracy_high_school_biology": 0.34375,
"mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091,
"mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
"mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
"mmlu_eval_accuracy_high_school_geography": 0.6818181818181818,
"mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666,
"mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723,
"mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483,
"mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
"mmlu_eval_accuracy_high_school_physics": 0.35294117647058826,
"mmlu_eval_accuracy_high_school_psychology": 0.7333333333333333,
"mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
"mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
"mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
"mmlu_eval_accuracy_human_aging": 0.6956521739130435,
"mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
"mmlu_eval_accuracy_international_law": 0.6923076923076923,
"mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
"mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556,
"mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
"mmlu_eval_accuracy_management": 0.45454545454545453,
"mmlu_eval_accuracy_marketing": 0.72,
"mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
"mmlu_eval_accuracy_miscellaneous": 0.686046511627907,
"mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
"mmlu_eval_accuracy_moral_scenarios": 0.24,
"mmlu_eval_accuracy_nutrition": 0.5757575757575758,
"mmlu_eval_accuracy_philosophy": 0.5,
"mmlu_eval_accuracy_prehistory": 0.4857142857142857,
"mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
"mmlu_eval_accuracy_professional_law": 0.3058823529411765,
"mmlu_eval_accuracy_professional_medicine": 0.41935483870967744,
"mmlu_eval_accuracy_professional_psychology": 0.37681159420289856,
"mmlu_eval_accuracy_public_relations": 0.5833333333333334,
"mmlu_eval_accuracy_security_studies": 0.5555555555555556,
"mmlu_eval_accuracy_sociology": 0.7272727272727273,
"mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
"mmlu_eval_accuracy_virology": 0.4444444444444444,
"mmlu_eval_accuracy_world_religions": 0.6842105263157895,
"mmlu_loss": 1.1610933113113706,
"step": 600
}
],
"max_steps": 5000,
"num_train_epochs": 4,
"total_flos": 1.300127813027758e+17,
"trial_name": null,
"trial_params": null
}