{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.0, "eval_steps": 500, "global_step": 100, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "grad_norm": 1.148179292678833, "learning_rate": 4e-05, "loss": 0.5675, "step": 1 }, { "epoch": 0.04, "grad_norm": 1.114451289176941, "learning_rate": 8e-05, "loss": 0.6005, "step": 2 }, { "epoch": 0.06, "grad_norm": 0.8947972655296326, "learning_rate": 0.00012, "loss": 0.5333, "step": 3 }, { "epoch": 0.08, "grad_norm": 0.6874412298202515, "learning_rate": 0.00016, "loss": 0.5126, "step": 4 }, { "epoch": 0.1, "grad_norm": 0.6369613409042358, "learning_rate": 0.0002, "loss": 0.5243, "step": 5 }, { "epoch": 0.12, "grad_norm": 0.5632033944129944, "learning_rate": 0.00019994532573409262, "loss": 0.5335, "step": 6 }, { "epoch": 0.14, "grad_norm": 0.4783449172973633, "learning_rate": 0.00019978136272187747, "loss": 0.5066, "step": 7 }, { "epoch": 0.16, "grad_norm": 0.4505637586116791, "learning_rate": 0.00019950829025450114, "loss": 0.4819, "step": 8 }, { "epoch": 0.18, "grad_norm": 0.4964255690574646, "learning_rate": 0.00019912640693269752, "loss": 0.4949, "step": 9 }, { "epoch": 0.2, "grad_norm": 0.48732897639274597, "learning_rate": 0.00019863613034027224, "loss": 0.5023, "step": 10 }, { "epoch": 0.22, "grad_norm": 0.46126267313957214, "learning_rate": 0.00019803799658748094, "loss": 0.4869, "step": 11 }, { "epoch": 0.24, "grad_norm": 0.4295591711997986, "learning_rate": 0.0001973326597248006, "loss": 0.4706, "step": 12 }, { "epoch": 0.26, "grad_norm": 0.4603804647922516, "learning_rate": 0.00019652089102773488, "loss": 0.4861, "step": 13 }, { "epoch": 0.28, "grad_norm": 0.43054094910621643, "learning_rate": 0.00019560357815343577, "loss": 0.5147, "step": 14 }, { "epoch": 0.3, "grad_norm": 0.41392526030540466, "learning_rate": 0.00019458172417006347, "loss": 0.4657, "step": 15 }, { "epoch": 0.32, "grad_norm": 0.3919243812561035, "learning_rate": 0.0001934564464599461, "loss": 0.4649, "step": 16 }, { "epoch": 0.34, "grad_norm": 0.38946983218193054, "learning_rate": 0.00019222897549773848, "loss": 0.4626, "step": 17 }, { "epoch": 0.36, "grad_norm": 0.3812195956707001, "learning_rate": 0.00019090065350491626, "loss": 0.4537, "step": 18 }, { "epoch": 0.38, "grad_norm": 0.37084001302719116, "learning_rate": 0.00018947293298207635, "loss": 0.4489, "step": 19 }, { "epoch": 0.4, "grad_norm": 0.40874817967414856, "learning_rate": 0.0001879473751206489, "loss": 0.4583, "step": 20 }, { "epoch": 0.42, "grad_norm": 0.38442370295524597, "learning_rate": 0.00018632564809575742, "loss": 0.4279, "step": 21 }, { "epoch": 0.44, "grad_norm": 0.3699128329753876, "learning_rate": 0.00018460952524209355, "loss": 0.4404, "step": 22 }, { "epoch": 0.46, "grad_norm": 0.40334978699684143, "learning_rate": 0.00018280088311480201, "loss": 0.4448, "step": 23 }, { "epoch": 0.48, "grad_norm": 0.3896102011203766, "learning_rate": 0.00018090169943749476, "loss": 0.4421, "step": 24 }, { "epoch": 0.5, "grad_norm": 0.3784579336643219, "learning_rate": 0.00017891405093963938, "loss": 0.4538, "step": 25 }, { "epoch": 0.52, "grad_norm": 0.3841725289821625, "learning_rate": 0.00017684011108568592, "loss": 0.4251, "step": 26 }, { "epoch": 0.54, "grad_norm": 0.3888923227787018, "learning_rate": 0.0001746821476984154, "loss": 0.3996, "step": 27 }, { "epoch": 0.56, "grad_norm": 0.40714430809020996, "learning_rate": 0.00017244252047910892, "loss": 0.4298, "step": 28 }, { "epoch": 0.58, "grad_norm": 0.395693838596344, "learning_rate": 0.00017012367842724887, "loss": 0.4044, "step": 29 }, { "epoch": 0.6, "grad_norm": 0.3896782398223877, "learning_rate": 0.00016772815716257412, "loss": 0.4093, "step": 30 }, { "epoch": 0.62, "grad_norm": 0.3841933310031891, "learning_rate": 0.00016525857615241687, "loss": 0.4139, "step": 31 }, { "epoch": 0.64, "grad_norm": 0.3779061436653137, "learning_rate": 0.0001627176358473537, "loss": 0.3962, "step": 32 }, { "epoch": 0.66, "grad_norm": 0.4033326208591461, "learning_rate": 0.00016010811472830252, "loss": 0.3883, "step": 33 }, { "epoch": 0.68, "grad_norm": 0.3781256079673767, "learning_rate": 0.00015743286626829437, "loss": 0.3858, "step": 34 }, { "epoch": 0.7, "grad_norm": 0.37716230750083923, "learning_rate": 0.00015469481581224272, "loss": 0.3938, "step": 35 }, { "epoch": 0.72, "grad_norm": 0.395526260137558, "learning_rate": 0.00015189695737812152, "loss": 0.4122, "step": 36 }, { "epoch": 0.74, "grad_norm": 0.368266224861145, "learning_rate": 0.00014904235038305083, "loss": 0.403, "step": 37 }, { "epoch": 0.76, "grad_norm": 0.37806516885757446, "learning_rate": 0.0001461341162978688, "loss": 0.374, "step": 38 }, { "epoch": 0.78, "grad_norm": 0.37161675095558167, "learning_rate": 0.00014317543523384928, "loss": 0.3792, "step": 39 }, { "epoch": 0.8, "grad_norm": 0.3814436197280884, "learning_rate": 0.00014016954246529696, "loss": 0.3824, "step": 40 }, { "epoch": 0.82, "grad_norm": 0.3864171504974365, "learning_rate": 0.00013711972489182208, "loss": 0.3693, "step": 41 }, { "epoch": 0.84, "grad_norm": 0.35850077867507935, "learning_rate": 0.00013402931744416433, "loss": 0.3623, "step": 42 }, { "epoch": 0.86, "grad_norm": 0.36775171756744385, "learning_rate": 0.00013090169943749476, "loss": 0.3643, "step": 43 }, { "epoch": 0.88, "grad_norm": 0.37669485807418823, "learning_rate": 0.00012774029087618446, "loss": 0.3756, "step": 44 }, { "epoch": 0.9, "grad_norm": 0.35857564210891724, "learning_rate": 0.00012454854871407994, "loss": 0.3947, "step": 45 }, { "epoch": 0.92, "grad_norm": 0.3690029978752136, "learning_rate": 0.0001213299630743747, "loss": 0.3548, "step": 46 }, { "epoch": 0.94, "grad_norm": 0.3639805018901825, "learning_rate": 0.000118088053433211, "loss": 0.3627, "step": 47 }, { "epoch": 0.96, "grad_norm": 0.37132132053375244, "learning_rate": 0.0001148263647711842, "loss": 0.3668, "step": 48 }, { "epoch": 0.98, "grad_norm": 0.37001127004623413, "learning_rate": 0.00011154846369695863, "loss": 0.3687, "step": 49 }, { "epoch": 1.0, "grad_norm": 0.3796997368335724, "learning_rate": 0.00010825793454723325, "loss": 0.3819, "step": 50 }, { "epoch": 1.02, "grad_norm": 0.29557618498802185, "learning_rate": 0.00010495837546732224, "loss": 0.2758, "step": 51 }, { "epoch": 1.04, "grad_norm": 0.31048524379730225, "learning_rate": 0.00010165339447663587, "loss": 0.2548, "step": 52 }, { "epoch": 1.06, "grad_norm": 0.3087123930454254, "learning_rate": 9.834660552336415e-05, "loss": 0.2647, "step": 53 }, { "epoch": 1.08, "grad_norm": 0.3230942487716675, "learning_rate": 9.504162453267777e-05, "loss": 0.2571, "step": 54 }, { "epoch": 1.1, "grad_norm": 0.3078308701515198, "learning_rate": 9.174206545276677e-05, "loss": 0.247, "step": 55 }, { "epoch": 1.12, "grad_norm": 0.3151338994503021, "learning_rate": 8.845153630304139e-05, "loss": 0.2649, "step": 56 }, { "epoch": 1.1400000000000001, "grad_norm": 0.32113245129585266, "learning_rate": 8.517363522881579e-05, "loss": 0.2441, "step": 57 }, { "epoch": 1.16, "grad_norm": 0.3001263737678528, "learning_rate": 8.191194656678904e-05, "loss": 0.2545, "step": 58 }, { "epoch": 1.18, "grad_norm": 0.31521672010421753, "learning_rate": 7.867003692562534e-05, "loss": 0.25, "step": 59 }, { "epoch": 1.2, "grad_norm": 0.3146115839481354, "learning_rate": 7.54514512859201e-05, "loss": 0.2758, "step": 60 }, { "epoch": 1.22, "grad_norm": 0.30817195773124695, "learning_rate": 7.225970912381556e-05, "loss": 0.2639, "step": 61 }, { "epoch": 1.24, "grad_norm": 0.3167981505393982, "learning_rate": 6.909830056250527e-05, "loss": 0.2523, "step": 62 }, { "epoch": 1.26, "grad_norm": 0.33051297068595886, "learning_rate": 6.59706825558357e-05, "loss": 0.2478, "step": 63 }, { "epoch": 1.28, "grad_norm": 0.32061681151390076, "learning_rate": 6.28802751081779e-05, "loss": 0.2292, "step": 64 }, { "epoch": 1.3, "grad_norm": 0.32867640256881714, "learning_rate": 5.983045753470308e-05, "loss": 0.2506, "step": 65 }, { "epoch": 1.32, "grad_norm": 0.32115456461906433, "learning_rate": 5.6824564766150726e-05, "loss": 0.2514, "step": 66 }, { "epoch": 1.34, "grad_norm": 0.3387518525123596, "learning_rate": 5.386588370213124e-05, "loss": 0.244, "step": 67 }, { "epoch": 1.3599999999999999, "grad_norm": 0.32781389355659485, "learning_rate": 5.095764961694922e-05, "loss": 0.2424, "step": 68 }, { "epoch": 1.38, "grad_norm": 0.3345761299133301, "learning_rate": 4.810304262187852e-05, "loss": 0.2513, "step": 69 }, { "epoch": 1.4, "grad_norm": 0.3255421817302704, "learning_rate": 4.530518418775733e-05, "loss": 0.2456, "step": 70 }, { "epoch": 1.42, "grad_norm": 0.3354507088661194, "learning_rate": 4.256713373170564e-05, "loss": 0.2457, "step": 71 }, { "epoch": 1.44, "grad_norm": 0.3373594284057617, "learning_rate": 3.9891885271697496e-05, "loss": 0.2475, "step": 72 }, { "epoch": 1.46, "grad_norm": 0.32666316628456116, "learning_rate": 3.7282364152646297e-05, "loss": 0.2301, "step": 73 }, { "epoch": 1.48, "grad_norm": 0.3279685974121094, "learning_rate": 3.4741423847583134e-05, "loss": 0.2472, "step": 74 }, { "epoch": 1.5, "grad_norm": 0.3301085829734802, "learning_rate": 3.227184283742591e-05, "loss": 0.2294, "step": 75 }, { "epoch": 1.52, "grad_norm": 0.33756864070892334, "learning_rate": 2.9876321572751144e-05, "loss": 0.2319, "step": 76 }, { "epoch": 1.54, "grad_norm": 0.3326968848705292, "learning_rate": 2.7557479520891104e-05, "loss": 0.2435, "step": 77 }, { "epoch": 1.56, "grad_norm": 0.3414022624492645, "learning_rate": 2.5317852301584643e-05, "loss": 0.2365, "step": 78 }, { "epoch": 1.58, "grad_norm": 0.3446585237979889, "learning_rate": 2.315988891431412e-05, "loss": 0.2353, "step": 79 }, { "epoch": 1.6, "grad_norm": 0.33243560791015625, "learning_rate": 2.1085949060360654e-05, "loss": 0.2604, "step": 80 }, { "epoch": 1.62, "grad_norm": 0.3418924808502197, "learning_rate": 1.9098300562505266e-05, "loss": 0.2546, "step": 81 }, { "epoch": 1.6400000000000001, "grad_norm": 0.33969858288764954, "learning_rate": 1.7199116885197995e-05, "loss": 0.2546, "step": 82 }, { "epoch": 1.6600000000000001, "grad_norm": 0.3274635076522827, "learning_rate": 1.5390474757906446e-05, "loss": 0.2336, "step": 83 }, { "epoch": 1.6800000000000002, "grad_norm": 0.3252343237400055, "learning_rate": 1.3674351904242611e-05, "loss": 0.2307, "step": 84 }, { "epoch": 1.7, "grad_norm": 0.3318844735622406, "learning_rate": 1.2052624879351104e-05, "loss": 0.2273, "step": 85 }, { "epoch": 1.72, "grad_norm": 0.3397023677825928, "learning_rate": 1.0527067017923654e-05, "loss": 0.237, "step": 86 }, { "epoch": 1.74, "grad_norm": 0.32320481538772583, "learning_rate": 9.09934649508375e-06, "loss": 0.24, "step": 87 }, { "epoch": 1.76, "grad_norm": 0.34301823377609253, "learning_rate": 7.771024502261526e-06, "loss": 0.2447, "step": 88 }, { "epoch": 1.78, "grad_norm": 0.33157962560653687, "learning_rate": 6.543553540053926e-06, "loss": 0.2512, "step": 89 }, { "epoch": 1.8, "grad_norm": 0.33687952160835266, "learning_rate": 5.418275829936537e-06, "loss": 0.2538, "step": 90 }, { "epoch": 1.8199999999999998, "grad_norm": 0.3404587209224701, "learning_rate": 4.3964218465642355e-06, "loss": 0.258, "step": 91 }, { "epoch": 1.8399999999999999, "grad_norm": 0.3458779752254486, "learning_rate": 3.4791089722651436e-06, "loss": 0.2392, "step": 92 }, { "epoch": 1.8599999999999999, "grad_norm": 0.33880794048309326, "learning_rate": 2.667340275199426e-06, "loss": 0.2542, "step": 93 }, { "epoch": 1.88, "grad_norm": 0.3243839144706726, "learning_rate": 1.9620034125190644e-06, "loss": 0.2371, "step": 94 }, { "epoch": 1.9, "grad_norm": 0.3197105824947357, "learning_rate": 1.3638696597277679e-06, "loss": 0.2237, "step": 95 }, { "epoch": 1.92, "grad_norm": 0.32444778084754944, "learning_rate": 8.735930673024806e-07, "loss": 0.2331, "step": 96 }, { "epoch": 1.94, "grad_norm": 0.3440603017807007, "learning_rate": 4.917097454988584e-07, "loss": 0.257, "step": 97 }, { "epoch": 1.96, "grad_norm": 0.3220385015010834, "learning_rate": 2.1863727812254653e-07, "loss": 0.2357, "step": 98 }, { "epoch": 1.98, "grad_norm": 0.3219694495201111, "learning_rate": 5.467426590739511e-08, "loss": 0.2483, "step": 99 }, { "epoch": 2.0, "grad_norm": 0.3302796185016632, "learning_rate": 0.0, "loss": 0.2355, "step": 100 } ], "logging_steps": 1, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.5112362971367014e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }