{ "best_metric": 0.7883281707763672, "best_model_checkpoint": "data/Mistral-7B_task-2_120-samples_config-1_full/checkpoint-66", "epoch": 13.0, "eval_steps": 500, "global_step": 143, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.09090909090909091, "grad_norm": 1.1011921167373657, "learning_rate": 1.818181818181818e-06, "loss": 1.1337, "step": 1 }, { "epoch": 0.18181818181818182, "grad_norm": 1.392738699913025, "learning_rate": 3.636363636363636e-06, "loss": 1.3187, "step": 2 }, { "epoch": 0.36363636363636365, "grad_norm": 1.102772831916809, "learning_rate": 7.272727272727272e-06, "loss": 1.1577, "step": 4 }, { "epoch": 0.5454545454545454, "grad_norm": 1.1216496229171753, "learning_rate": 1.0909090909090909e-05, "loss": 1.1662, "step": 6 }, { "epoch": 0.7272727272727273, "grad_norm": 1.0292140245437622, "learning_rate": 1.4545454545454545e-05, "loss": 1.2153, "step": 8 }, { "epoch": 0.9090909090909091, "grad_norm": 0.77543705701828, "learning_rate": 1.8181818181818182e-05, "loss": 1.0983, "step": 10 }, { "epoch": 1.0, "eval_loss": 1.0808161497116089, "eval_runtime": 30.0166, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 11 }, { "epoch": 1.0909090909090908, "grad_norm": 0.6813668608665466, "learning_rate": 2.1818181818181818e-05, "loss": 1.1177, "step": 12 }, { "epoch": 1.2727272727272727, "grad_norm": 0.435611367225647, "learning_rate": 2.5454545454545454e-05, "loss": 1.0331, "step": 14 }, { "epoch": 1.4545454545454546, "grad_norm": 0.42835795879364014, "learning_rate": 2.909090909090909e-05, "loss": 1.0366, "step": 16 }, { "epoch": 1.6363636363636362, "grad_norm": 0.40383854508399963, "learning_rate": 3.272727272727273e-05, "loss": 1.047, "step": 18 }, { "epoch": 1.8181818181818183, "grad_norm": 0.37422505021095276, "learning_rate": 3.6363636363636364e-05, "loss": 0.9824, "step": 20 }, { "epoch": 2.0, "grad_norm": 0.3606886863708496, "learning_rate": 4e-05, "loss": 0.8983, "step": 22 }, { "epoch": 2.0, "eval_loss": 0.953758180141449, "eval_runtime": 30.0246, "eval_samples_per_second": 0.799, "eval_steps_per_second": 0.799, "step": 22 }, { "epoch": 2.1818181818181817, "grad_norm": 0.30870339274406433, "learning_rate": 4.3636363636363636e-05, "loss": 0.9373, "step": 24 }, { "epoch": 2.3636363636363638, "grad_norm": 0.4794676899909973, "learning_rate": 4.7272727272727275e-05, "loss": 0.919, "step": 26 }, { "epoch": 2.5454545454545454, "grad_norm": 0.39165472984313965, "learning_rate": 5.090909090909091e-05, "loss": 0.8539, "step": 28 }, { "epoch": 2.7272727272727275, "grad_norm": 0.3516630232334137, "learning_rate": 5.4545454545454546e-05, "loss": 0.8662, "step": 30 }, { "epoch": 2.909090909090909, "grad_norm": 0.3009703755378723, "learning_rate": 5.818181818181818e-05, "loss": 0.8093, "step": 32 }, { "epoch": 3.0, "eval_loss": 0.8323513865470886, "eval_runtime": 30.0119, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 33 }, { "epoch": 3.090909090909091, "grad_norm": 0.2687840759754181, "learning_rate": 6.181818181818182e-05, "loss": 0.8227, "step": 34 }, { "epoch": 3.2727272727272725, "grad_norm": 0.2438051998615265, "learning_rate": 6.545454545454546e-05, "loss": 0.7844, "step": 36 }, { "epoch": 3.4545454545454546, "grad_norm": 0.2567192316055298, "learning_rate": 6.90909090909091e-05, "loss": 0.7855, "step": 38 }, { "epoch": 3.6363636363636362, "grad_norm": 0.2168852835893631, "learning_rate": 7.272727272727273e-05, "loss": 0.7365, "step": 40 }, { "epoch": 3.8181818181818183, "grad_norm": 0.3096124231815338, "learning_rate": 7.636363636363637e-05, "loss": 0.7191, "step": 42 }, { "epoch": 4.0, "grad_norm": 0.24166655540466309, "learning_rate": 8e-05, "loss": 0.812, "step": 44 }, { "epoch": 4.0, "eval_loss": 0.8044827580451965, "eval_runtime": 30.0135, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 44 }, { "epoch": 4.181818181818182, "grad_norm": 0.2755947411060333, "learning_rate": 8.363636363636364e-05, "loss": 0.7234, "step": 46 }, { "epoch": 4.363636363636363, "grad_norm": 0.2453605979681015, "learning_rate": 8.727272727272727e-05, "loss": 0.7506, "step": 48 }, { "epoch": 4.545454545454545, "grad_norm": 0.2451571524143219, "learning_rate": 9.090909090909092e-05, "loss": 0.722, "step": 50 }, { "epoch": 4.7272727272727275, "grad_norm": 0.3140670359134674, "learning_rate": 9.454545454545455e-05, "loss": 0.7278, "step": 52 }, { "epoch": 4.909090909090909, "grad_norm": 0.36974820494651794, "learning_rate": 9.818181818181818e-05, "loss": 0.7556, "step": 54 }, { "epoch": 5.0, "eval_loss": 0.7907748222351074, "eval_runtime": 30.0136, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 55 }, { "epoch": 5.090909090909091, "grad_norm": 0.2563894987106323, "learning_rate": 9.999899300364532e-05, "loss": 0.7018, "step": 56 }, { "epoch": 5.2727272727272725, "grad_norm": 0.2989403307437897, "learning_rate": 9.99909372761763e-05, "loss": 0.717, "step": 58 }, { "epoch": 5.454545454545454, "grad_norm": 0.32498207688331604, "learning_rate": 9.997482711915927e-05, "loss": 0.6604, "step": 60 }, { "epoch": 5.636363636363637, "grad_norm": 0.32759106159210205, "learning_rate": 9.99506651282272e-05, "loss": 0.6733, "step": 62 }, { "epoch": 5.818181818181818, "grad_norm": 0.33633723855018616, "learning_rate": 9.991845519630678e-05, "loss": 0.6688, "step": 64 }, { "epoch": 6.0, "grad_norm": 0.3074813187122345, "learning_rate": 9.987820251299122e-05, "loss": 0.7048, "step": 66 }, { "epoch": 6.0, "eval_loss": 0.7883281707763672, "eval_runtime": 30.0108, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 66 }, { "epoch": 6.181818181818182, "grad_norm": 0.3761835992336273, "learning_rate": 9.982991356370404e-05, "loss": 0.6087, "step": 68 }, { "epoch": 6.363636363636363, "grad_norm": 0.39026913046836853, "learning_rate": 9.977359612865423e-05, "loss": 0.6425, "step": 70 }, { "epoch": 6.545454545454545, "grad_norm": 0.4483635127544403, "learning_rate": 9.970925928158274e-05, "loss": 0.5776, "step": 72 }, { "epoch": 6.7272727272727275, "grad_norm": 0.47310715913772583, "learning_rate": 9.963691338830044e-05, "loss": 0.6171, "step": 74 }, { "epoch": 6.909090909090909, "grad_norm": 0.4454406797885895, "learning_rate": 9.955657010501806e-05, "loss": 0.6515, "step": 76 }, { "epoch": 7.0, "eval_loss": 0.7959274649620056, "eval_runtime": 30.0186, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 77 }, { "epoch": 7.090909090909091, "grad_norm": 0.4192354083061218, "learning_rate": 9.946824237646824e-05, "loss": 0.6557, "step": 78 }, { "epoch": 7.2727272727272725, "grad_norm": 0.5135143399238586, "learning_rate": 9.937194443381972e-05, "loss": 0.5054, "step": 80 }, { "epoch": 7.454545454545454, "grad_norm": 0.6870741844177246, "learning_rate": 9.926769179238466e-05, "loss": 0.5194, "step": 82 }, { "epoch": 7.636363636363637, "grad_norm": 0.5848849415779114, "learning_rate": 9.915550124911866e-05, "loss": 0.5496, "step": 84 }, { "epoch": 7.818181818181818, "grad_norm": 0.5785425305366516, "learning_rate": 9.903539087991462e-05, "loss": 0.5373, "step": 86 }, { "epoch": 8.0, "grad_norm": 0.6328079104423523, "learning_rate": 9.890738003669029e-05, "loss": 0.5883, "step": 88 }, { "epoch": 8.0, "eval_loss": 0.8303840160369873, "eval_runtime": 30.0128, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 88 }, { "epoch": 8.181818181818182, "grad_norm": 0.6976420879364014, "learning_rate": 9.877148934427037e-05, "loss": 0.505, "step": 90 }, { "epoch": 8.363636363636363, "grad_norm": 1.2223607301712036, "learning_rate": 9.862774069706346e-05, "loss": 0.4639, "step": 92 }, { "epoch": 8.545454545454545, "grad_norm": 0.7318143844604492, "learning_rate": 9.847615725553456e-05, "loss": 0.4862, "step": 94 }, { "epoch": 8.727272727272727, "grad_norm": 0.6903765201568604, "learning_rate": 9.831676344247342e-05, "loss": 0.4106, "step": 96 }, { "epoch": 8.909090909090908, "grad_norm": 0.8092580437660217, "learning_rate": 9.814958493905963e-05, "loss": 0.4745, "step": 98 }, { "epoch": 9.0, "eval_loss": 0.8685193061828613, "eval_runtime": 30.0124, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 99 }, { "epoch": 9.090909090909092, "grad_norm": 0.6964694857597351, "learning_rate": 9.797464868072488e-05, "loss": 0.4666, "step": 100 }, { "epoch": 9.272727272727273, "grad_norm": 0.9236999154090881, "learning_rate": 9.779198285281325e-05, "loss": 0.3613, "step": 102 }, { "epoch": 9.454545454545455, "grad_norm": 0.9183457493782043, "learning_rate": 9.760161688604008e-05, "loss": 0.3662, "step": 104 }, { "epoch": 9.636363636363637, "grad_norm": 0.9439888000488281, "learning_rate": 9.740358145174998e-05, "loss": 0.4229, "step": 106 }, { "epoch": 9.818181818181818, "grad_norm": 0.8765619993209839, "learning_rate": 9.719790845697533e-05, "loss": 0.3876, "step": 108 }, { "epoch": 10.0, "grad_norm": 1.483403205871582, "learning_rate": 9.698463103929542e-05, "loss": 0.3673, "step": 110 }, { "epoch": 10.0, "eval_loss": 0.927913248538971, "eval_runtime": 30.007, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 110 }, { "epoch": 10.181818181818182, "grad_norm": 0.7813490033149719, "learning_rate": 9.676378356149734e-05, "loss": 0.2983, "step": 112 }, { "epoch": 10.363636363636363, "grad_norm": 1.1128252744674683, "learning_rate": 9.653540160603956e-05, "loss": 0.3078, "step": 114 }, { "epoch": 10.545454545454545, "grad_norm": 0.850096583366394, "learning_rate": 9.629952196931901e-05, "loss": 0.3013, "step": 116 }, { "epoch": 10.727272727272727, "grad_norm": 0.9651398062705994, "learning_rate": 9.60561826557425e-05, "loss": 0.3184, "step": 118 }, { "epoch": 10.909090909090908, "grad_norm": 1.1538136005401611, "learning_rate": 9.580542287160348e-05, "loss": 0.3406, "step": 120 }, { "epoch": 11.0, "eval_loss": 0.9977002143859863, "eval_runtime": 30.0285, "eval_samples_per_second": 0.799, "eval_steps_per_second": 0.799, "step": 121 }, { "epoch": 11.090909090909092, "grad_norm": 0.910765528678894, "learning_rate": 9.554728301876526e-05, "loss": 0.2753, "step": 122 }, { "epoch": 11.272727272727273, "grad_norm": 1.1856135129928589, "learning_rate": 9.528180468815155e-05, "loss": 0.2395, "step": 124 }, { "epoch": 11.454545454545455, "grad_norm": 1.0970741510391235, "learning_rate": 9.50090306530454e-05, "loss": 0.2535, "step": 126 }, { "epoch": 11.636363636363637, "grad_norm": 0.8762866258621216, "learning_rate": 9.472900486219769e-05, "loss": 0.2833, "step": 128 }, { "epoch": 11.818181818181818, "grad_norm": 1.1564828157424927, "learning_rate": 9.444177243274618e-05, "loss": 0.2489, "step": 130 }, { "epoch": 12.0, "grad_norm": 0.9118844270706177, "learning_rate": 9.414737964294636e-05, "loss": 0.2499, "step": 132 }, { "epoch": 12.0, "eval_loss": 1.029537558555603, "eval_runtime": 30.0146, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 132 }, { "epoch": 12.181818181818182, "grad_norm": 0.7883229851722717, "learning_rate": 9.384587392471515e-05, "loss": 0.2215, "step": 134 }, { "epoch": 12.363636363636363, "grad_norm": 1.2054721117019653, "learning_rate": 9.353730385598887e-05, "loss": 0.1921, "step": 136 }, { "epoch": 12.545454545454545, "grad_norm": 0.9445869326591492, "learning_rate": 9.322171915289635e-05, "loss": 0.2046, "step": 138 }, { "epoch": 12.727272727272727, "grad_norm": 0.9175389409065247, "learning_rate": 9.289917066174886e-05, "loss": 0.1948, "step": 140 }, { "epoch": 12.909090909090908, "grad_norm": 1.1180704832077026, "learning_rate": 9.256971035084785e-05, "loss": 0.2273, "step": 142 }, { "epoch": 13.0, "eval_loss": 1.1281015872955322, "eval_runtime": 30.0147, "eval_samples_per_second": 0.8, "eval_steps_per_second": 0.8, "step": 143 }, { "epoch": 13.0, "step": 143, "total_flos": 1.3485981883275674e+17, "train_loss": 0.618694863744549, "train_runtime": 4908.8927, "train_samples_per_second": 0.896, "train_steps_per_second": 0.112 } ], "logging_steps": 2, "max_steps": 550, "num_input_tokens_seen": 0, "num_train_epochs": 50, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 7, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.3485981883275674e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }