{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.41450777202072536, "eval_steps": 500, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "grad_norm": 4.99941873550415, "learning_rate": 2e-05, "loss": 9.9329, "step": 10 }, { "epoch": 0.04, "grad_norm": 1.741065502166748, "learning_rate": 4e-05, "loss": 11.0746, "step": 20 }, { "epoch": 0.06, "grad_norm": 1.4727320671081543, "learning_rate": 6e-05, "loss": 2.7159, "step": 30 }, { "epoch": 0.08, "grad_norm": 0.1335960477590561, "learning_rate": 8e-05, "loss": 0.3969, "step": 40 }, { "epoch": 0.1, "grad_norm": 0.0014472692273557186, "learning_rate": 0.0001, "loss": 0.0032, "step": 50 }, { "epoch": 0.12, "grad_norm": 0.0010780546581372619, "learning_rate": 0.0001, "loss": 0.0002, "step": 60 }, { "epoch": 0.15, "grad_norm": 1.03132963180542, "learning_rate": 0.0001, "loss": 0.0002, "step": 70 }, { "epoch": 0.17, "grad_norm": 0.008827299810945988, "learning_rate": 0.0001, "loss": 0.0, "step": 80 }, { "epoch": 0.19, "grad_norm": 0.0002956670359708369, "learning_rate": 0.0001, "loss": 0.0001, "step": 90 }, { "epoch": 0.21, "grad_norm": 0.0003419867134653032, "learning_rate": 0.0001, "loss": 0.0, "step": 100 }, { "epoch": 0.23, "grad_norm": 0.0003681881644297391, "learning_rate": 0.0001, "loss": 0.0, "step": 110 }, { "epoch": 0.25, "grad_norm": 0.0002884200366679579, "learning_rate": 0.0001, "loss": 0.0, "step": 120 }, { "epoch": 0.27, "grad_norm": 0.00011985149467363954, "learning_rate": 0.0001, "loss": 0.0, "step": 130 }, { "epoch": 0.29, "grad_norm": 0.0003195986500941217, "learning_rate": 0.0001, "loss": 0.0, "step": 140 }, { "epoch": 0.31, "grad_norm": 0.00010149635636480525, "learning_rate": 0.0001, "loss": 0.0, "step": 150 }, { "epoch": 0.33, "grad_norm": 0.00010508792183827609, "learning_rate": 0.0001, "loss": 0.0, "step": 160 }, { "epoch": 0.35, "grad_norm": 0.00011793687008321285, "learning_rate": 0.0001, "loss": 0.006, "step": 170 }, { "epoch": 0.37, "grad_norm": 8.076676749624312e-05, "learning_rate": 0.0001, "loss": 0.0, "step": 180 }, { "epoch": 0.39, "grad_norm": 0.0007808339432813227, "learning_rate": 0.0001, "loss": 0.006, "step": 190 }, { "epoch": 0.41, "grad_norm": 0.11711683869361877, "learning_rate": 0.0001, "loss": 0.003, "step": 200 } ], "logging_steps": 10, "max_steps": 482, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "total_flos": 3.335841878562816e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }