{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.022800684020520615, "eval_steps": 5, "global_step": 15, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0015200456013680411, "grad_norm": 4.591590404510498, "learning_rate": 1e-05, "loss": 177.75, "step": 1 }, { "epoch": 0.0015200456013680411, "eval_loss": 11.114282608032227, "eval_runtime": 19.3598, "eval_samples_per_second": 57.232, "eval_steps_per_second": 28.616, "step": 1 }, { "epoch": 0.0030400912027360822, "grad_norm": 4.531345367431641, "learning_rate": 2e-05, "loss": 177.8125, "step": 2 }, { "epoch": 0.004560136804104123, "grad_norm": 4.347599983215332, "learning_rate": 3e-05, "loss": 177.5625, "step": 3 }, { "epoch": 0.0060801824054721645, "grad_norm": 4.609200477600098, "learning_rate": 4e-05, "loss": 177.75, "step": 4 }, { "epoch": 0.007600228006840205, "grad_norm": 4.364078521728516, "learning_rate": 5e-05, "loss": 177.875, "step": 5 }, { "epoch": 0.007600228006840205, "eval_loss": 11.11292839050293, "eval_runtime": 1.42, "eval_samples_per_second": 780.292, "eval_steps_per_second": 390.146, "step": 5 }, { "epoch": 0.009120273608208246, "grad_norm": 4.674941539764404, "learning_rate": 6e-05, "loss": 177.6875, "step": 6 }, { "epoch": 0.010640319209576287, "grad_norm": 4.507266998291016, "learning_rate": 7e-05, "loss": 177.8125, "step": 7 }, { "epoch": 0.012160364810944329, "grad_norm": 4.671020030975342, "learning_rate": 8e-05, "loss": 178.0, "step": 8 }, { "epoch": 0.013680410412312369, "grad_norm": 4.70671272277832, "learning_rate": 9e-05, "loss": 177.875, "step": 9 }, { "epoch": 0.01520045601368041, "grad_norm": 4.528318405151367, "learning_rate": 0.0001, "loss": 177.8125, "step": 10 }, { "epoch": 0.01520045601368041, "eval_loss": 11.104805946350098, "eval_runtime": 1.4199, "eval_samples_per_second": 780.319, "eval_steps_per_second": 390.159, "step": 10 }, { "epoch": 0.016720501615048452, "grad_norm": 5.062138557434082, "learning_rate": 9.755282581475769e-05, "loss": 177.625, "step": 11 }, { "epoch": 0.01824054721641649, "grad_norm": 4.429068565368652, "learning_rate": 9.045084971874738e-05, "loss": 177.75, "step": 12 }, { "epoch": 0.019760592817784535, "grad_norm": 4.724205017089844, "learning_rate": 7.938926261462366e-05, "loss": 177.5, "step": 13 }, { "epoch": 0.021280638419152575, "grad_norm": 5.304701328277588, "learning_rate": 6.545084971874738e-05, "loss": 177.4375, "step": 14 }, { "epoch": 0.022800684020520615, "grad_norm": 5.0064005851745605, "learning_rate": 5e-05, "loss": 177.25, "step": 15 }, { "epoch": 0.022800684020520615, "eval_loss": 11.090591430664062, "eval_runtime": 1.464, "eval_samples_per_second": 756.813, "eval_steps_per_second": 378.406, "step": 15 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 583510523904.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }