{ "best_metric": null, "best_model_checkpoint": null, "epoch": 4.745762711864407, "eval_steps": 500, "global_step": 70, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.06779661016949153, "grad_norm": 3.306018590927124, "learning_rate": 9.999125804563732e-05, "loss": 2.1358, "num_input_tokens_seen": 10896, "step": 1 }, { "epoch": 0.13559322033898305, "grad_norm": 2.0162065029144287, "learning_rate": 9.996503523941994e-05, "loss": 2.3575, "num_input_tokens_seen": 20512, "step": 2 }, { "epoch": 0.2033898305084746, "grad_norm": 2.1740899085998535, "learning_rate": 9.992134075089084e-05, "loss": 1.4712, "num_input_tokens_seen": 29984, "step": 3 }, { "epoch": 0.2711864406779661, "grad_norm": 2.0339269638061523, "learning_rate": 9.986018985905901e-05, "loss": 2.3124, "num_input_tokens_seen": 39760, "step": 4 }, { "epoch": 0.3389830508474576, "grad_norm": 1.0339545011520386, "learning_rate": 9.978160394705668e-05, "loss": 1.8062, "num_input_tokens_seen": 49856, "step": 5 }, { "epoch": 0.4067796610169492, "grad_norm": 0.6279817819595337, "learning_rate": 9.968561049466214e-05, "loss": 2.0499, "num_input_tokens_seen": 60208, "step": 6 }, { "epoch": 0.4745762711864407, "grad_norm": 0.5110039114952087, "learning_rate": 9.957224306869053e-05, "loss": 1.1603, "num_input_tokens_seen": 70816, "step": 7 }, { "epoch": 0.5423728813559322, "grad_norm": 0.8199847936630249, "learning_rate": 9.944154131125642e-05, "loss": 1.9923, "num_input_tokens_seen": 79968, "step": 8 }, { "epoch": 0.6101694915254238, "grad_norm": 0.729006290435791, "learning_rate": 9.92935509259118e-05, "loss": 1.9688, "num_input_tokens_seen": 89472, "step": 9 }, { "epoch": 0.6779661016949152, "grad_norm": 1.2717365026474, "learning_rate": 9.912832366166442e-05, "loss": 2.2878, "num_input_tokens_seen": 100336, "step": 10 }, { "epoch": 0.7457627118644068, "grad_norm": 0.6771590113639832, "learning_rate": 9.894591729488242e-05, "loss": 2.001, "num_input_tokens_seen": 110640, "step": 11 }, { "epoch": 0.8135593220338984, "grad_norm": 0.4566394090652466, "learning_rate": 9.874639560909117e-05, "loss": 1.8458, "num_input_tokens_seen": 120304, "step": 12 }, { "epoch": 0.8813559322033898, "grad_norm": 0.6037254929542542, "learning_rate": 9.852982837266955e-05, "loss": 1.8775, "num_input_tokens_seen": 128288, "step": 13 }, { "epoch": 0.9491525423728814, "grad_norm": 0.6338982582092285, "learning_rate": 9.829629131445342e-05, "loss": 1.9309, "num_input_tokens_seen": 138624, "step": 14 }, { "epoch": 1.0169491525423728, "grad_norm": 0.5118916630744934, "learning_rate": 9.804586609725499e-05, "loss": 1.5507, "num_input_tokens_seen": 148208, "step": 15 }, { "epoch": 1.0847457627118644, "grad_norm": 0.4119274914264679, "learning_rate": 9.777864028930705e-05, "loss": 1.6802, "num_input_tokens_seen": 159200, "step": 16 }, { "epoch": 1.152542372881356, "grad_norm": 0.5065487623214722, "learning_rate": 9.74947073336423e-05, "loss": 1.5648, "num_input_tokens_seen": 166544, "step": 17 }, { "epoch": 1.2203389830508475, "grad_norm": 0.47032174468040466, "learning_rate": 9.719416651541839e-05, "loss": 1.3598, "num_input_tokens_seen": 177120, "step": 18 }, { "epoch": 1.288135593220339, "grad_norm": 0.38840025663375854, "learning_rate": 9.687712292719997e-05, "loss": 1.3238, "num_input_tokens_seen": 186272, "step": 19 }, { "epoch": 1.3559322033898304, "grad_norm": 0.5947024822235107, "learning_rate": 9.654368743221022e-05, "loss": 1.8704, "num_input_tokens_seen": 196480, "step": 20 }, { "epoch": 1.423728813559322, "grad_norm": 0.5424012541770935, "learning_rate": 9.619397662556435e-05, "loss": 1.8205, "num_input_tokens_seen": 203856, "step": 21 }, { "epoch": 1.4915254237288136, "grad_norm": 0.45230892300605774, "learning_rate": 9.582811279349882e-05, "loss": 1.3595, "num_input_tokens_seen": 214512, "step": 22 }, { "epoch": 1.559322033898305, "grad_norm": 0.41904568672180176, "learning_rate": 9.544622387061055e-05, "loss": 2.0797, "num_input_tokens_seen": 223792, "step": 23 }, { "epoch": 1.6271186440677967, "grad_norm": 0.5474861264228821, "learning_rate": 9.504844339512095e-05, "loss": 2.15, "num_input_tokens_seen": 233776, "step": 24 }, { "epoch": 1.694915254237288, "grad_norm": 0.4554285407066345, "learning_rate": 9.463491046218058e-05, "loss": 2.0059, "num_input_tokens_seen": 243600, "step": 25 }, { "epoch": 1.7627118644067796, "grad_norm": 0.4554867744445801, "learning_rate": 9.420576967523049e-05, "loss": 1.2697, "num_input_tokens_seen": 254608, "step": 26 }, { "epoch": 1.8305084745762712, "grad_norm": 0.44335854053497314, "learning_rate": 9.376117109543769e-05, "loss": 1.6259, "num_input_tokens_seen": 265728, "step": 27 }, { "epoch": 1.8983050847457628, "grad_norm": 0.36280569434165955, "learning_rate": 9.330127018922194e-05, "loss": 1.0618, "num_input_tokens_seen": 276624, "step": 28 }, { "epoch": 1.9661016949152543, "grad_norm": 0.4373883306980133, "learning_rate": 9.282622777389258e-05, "loss": 1.8557, "num_input_tokens_seen": 287072, "step": 29 }, { "epoch": 2.0338983050847457, "grad_norm": 0.5854040384292603, "learning_rate": 9.233620996141421e-05, "loss": 1.8077, "num_input_tokens_seen": 296248, "step": 30 }, { "epoch": 2.1016949152542375, "grad_norm": 0.5168242454528809, "learning_rate": 9.183138810032099e-05, "loss": 1.767, "num_input_tokens_seen": 305560, "step": 31 }, { "epoch": 2.169491525423729, "grad_norm": 0.4406554698944092, "learning_rate": 9.131193871579975e-05, "loss": 1.5561, "num_input_tokens_seen": 317176, "step": 32 }, { "epoch": 2.23728813559322, "grad_norm": 0.48960959911346436, "learning_rate": 9.077804344796302e-05, "loss": 1.7631, "num_input_tokens_seen": 327352, "step": 33 }, { "epoch": 2.305084745762712, "grad_norm": 0.4791744351387024, "learning_rate": 9.022988898833342e-05, "loss": 1.5754, "num_input_tokens_seen": 337384, "step": 34 }, { "epoch": 2.3728813559322033, "grad_norm": 0.42596694827079773, "learning_rate": 8.966766701456177e-05, "loss": 1.4501, "num_input_tokens_seen": 346952, "step": 35 }, { "epoch": 2.440677966101695, "grad_norm": 0.46600142121315, "learning_rate": 8.90915741234015e-05, "loss": 1.8493, "num_input_tokens_seen": 356568, "step": 36 }, { "epoch": 2.5084745762711864, "grad_norm": 0.49377578496932983, "learning_rate": 8.850181176196315e-05, "loss": 1.6969, "num_input_tokens_seen": 366504, "step": 37 }, { "epoch": 2.576271186440678, "grad_norm": 0.42628544569015503, "learning_rate": 8.789858615727265e-05, "loss": 1.3998, "num_input_tokens_seen": 377512, "step": 38 }, { "epoch": 2.6440677966101696, "grad_norm": 0.37030258774757385, "learning_rate": 8.728210824415827e-05, "loss": 1.1354, "num_input_tokens_seen": 388728, "step": 39 }, { "epoch": 2.711864406779661, "grad_norm": 0.763684093952179, "learning_rate": 8.665259359149132e-05, "loss": 1.5526, "num_input_tokens_seen": 393800, "step": 40 }, { "epoch": 2.7796610169491527, "grad_norm": 0.5274456143379211, "learning_rate": 8.601026232680634e-05, "loss": 1.671, "num_input_tokens_seen": 404072, "step": 41 }, { "epoch": 2.847457627118644, "grad_norm": 0.4358402192592621, "learning_rate": 8.535533905932738e-05, "loss": 1.1851, "num_input_tokens_seen": 415336, "step": 42 }, { "epoch": 2.915254237288136, "grad_norm": 0.6406439542770386, "learning_rate": 8.468805280142709e-05, "loss": 1.8113, "num_input_tokens_seen": 425288, "step": 43 }, { "epoch": 2.983050847457627, "grad_norm": 0.48384496569633484, "learning_rate": 8.400863688854597e-05, "loss": 1.2456, "num_input_tokens_seen": 436584, "step": 44 }, { "epoch": 3.0508474576271185, "grad_norm": 0.6171286702156067, "learning_rate": 8.33173288976002e-05, "loss": 1.479, "num_input_tokens_seen": 445072, "step": 45 }, { "epoch": 3.1186440677966103, "grad_norm": 0.5578075051307678, "learning_rate": 8.261437056390606e-05, "loss": 1.8757, "num_input_tokens_seen": 453696, "step": 46 }, { "epoch": 3.1864406779661016, "grad_norm": 0.5808205604553223, "learning_rate": 8.190000769665044e-05, "loss": 1.4613, "num_input_tokens_seen": 462976, "step": 47 }, { "epoch": 3.2542372881355934, "grad_norm": 0.46899017691612244, "learning_rate": 8.117449009293668e-05, "loss": 1.2657, "num_input_tokens_seen": 474016, "step": 48 }, { "epoch": 3.3220338983050848, "grad_norm": 0.677277147769928, "learning_rate": 8.043807145043604e-05, "loss": 1.7769, "num_input_tokens_seen": 482960, "step": 49 }, { "epoch": 3.389830508474576, "grad_norm": 0.543119490146637, "learning_rate": 7.969100927867507e-05, "loss": 1.3444, "num_input_tokens_seen": 493200, "step": 50 }, { "epoch": 3.457627118644068, "grad_norm": 0.5538585782051086, "learning_rate": 7.89335648089903e-05, "loss": 1.4113, "num_input_tokens_seen": 503168, "step": 51 }, { "epoch": 3.5254237288135593, "grad_norm": 0.6398975253105164, "learning_rate": 7.81660029031811e-05, "loss": 1.5596, "num_input_tokens_seen": 513776, "step": 52 }, { "epoch": 3.593220338983051, "grad_norm": 0.8313115835189819, "learning_rate": 7.738859196089358e-05, "loss": 1.7417, "num_input_tokens_seen": 521376, "step": 53 }, { "epoch": 3.6610169491525424, "grad_norm": 0.777786910533905, "learning_rate": 7.660160382576683e-05, "loss": 1.7439, "num_input_tokens_seen": 531504, "step": 54 }, { "epoch": 3.7288135593220337, "grad_norm": 0.7124835848808289, "learning_rate": 7.580531369037533e-05, "loss": 1.2392, "num_input_tokens_seen": 542832, "step": 55 }, { "epoch": 3.7966101694915255, "grad_norm": 0.7158543467521667, "learning_rate": 7.500000000000001e-05, "loss": 1.4463, "num_input_tokens_seen": 554128, "step": 56 }, { "epoch": 3.864406779661017, "grad_norm": 0.6232526898384094, "learning_rate": 7.4185944355262e-05, "loss": 1.1846, "num_input_tokens_seen": 564576, "step": 57 }, { "epoch": 3.9322033898305087, "grad_norm": 0.6178350448608398, "learning_rate": 7.33634314136531e-05, "loss": 1.1875, "num_input_tokens_seen": 574464, "step": 58 }, { "epoch": 4.0, "grad_norm": 0.720112144947052, "learning_rate": 7.253274878999727e-05, "loss": 1.1346, "num_input_tokens_seen": 584192, "step": 59 }, { "epoch": 4.067796610169491, "grad_norm": 0.8956817388534546, "learning_rate": 7.169418695587791e-05, "loss": 1.6462, "num_input_tokens_seen": 593440, "step": 60 }, { "epoch": 4.135593220338983, "grad_norm": 0.9503009915351868, "learning_rate": 7.084803913806641e-05, "loss": 1.0892, "num_input_tokens_seen": 603168, "step": 61 }, { "epoch": 4.203389830508475, "grad_norm": 0.843825101852417, "learning_rate": 6.999460121598704e-05, "loss": 1.4054, "num_input_tokens_seen": 611888, "step": 62 }, { "epoch": 4.271186440677966, "grad_norm": 0.7209855318069458, "learning_rate": 6.91341716182545e-05, "loss": 0.9302, "num_input_tokens_seen": 623312, "step": 63 }, { "epoch": 4.338983050847458, "grad_norm": 0.884834885597229, "learning_rate": 6.826705121831976e-05, "loss": 1.2218, "num_input_tokens_seen": 634768, "step": 64 }, { "epoch": 4.406779661016949, "grad_norm": 1.0309629440307617, "learning_rate": 6.739354322926136e-05, "loss": 1.1605, "num_input_tokens_seen": 644848, "step": 65 }, { "epoch": 4.47457627118644, "grad_norm": 1.097214698791504, "learning_rate": 6.651395309775837e-05, "loss": 1.7099, "num_input_tokens_seen": 654672, "step": 66 }, { "epoch": 4.5423728813559325, "grad_norm": 0.7945898175239563, "learning_rate": 6.562858839728223e-05, "loss": 0.8147, "num_input_tokens_seen": 665584, "step": 67 }, { "epoch": 4.610169491525424, "grad_norm": 1.2936004400253296, "learning_rate": 6.473775872054521e-05, "loss": 1.5787, "num_input_tokens_seen": 674096, "step": 68 }, { "epoch": 4.677966101694915, "grad_norm": 1.0135834217071533, "learning_rate": 6.384177557124247e-05, "loss": 1.2265, "num_input_tokens_seen": 683968, "step": 69 }, { "epoch": 4.745762711864407, "grad_norm": 0.9531109929084778, "learning_rate": 6.294095225512603e-05, "loss": 1.2753, "num_input_tokens_seen": 694368, "step": 70 } ], "logging_steps": 1, "max_steps": 168, "num_input_tokens_seen": 694368, "num_train_epochs": 12, "save_steps": 10, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.1354448922279936e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }