{ "best_metric": 0.5124370455741882, "best_model_checkpoint": "/home/ankur/projects/llm_test/Akshay/fine_tuning/llama_3_snippets_v3/checkpoint-2574", "epoch": 3.9992231501262383, "eval_steps": 500, "global_step": 3432, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.9998057875315596, "grad_norm": 0.1708984375, "learning_rate": 0.00015000000000000001, "loss": 0.6301, "step": 858 }, { "epoch": 0.9998057875315596, "eval_loss": 0.6022469997406006, "eval_runtime": 796.222, "eval_samples_per_second": 3.219, "eval_steps_per_second": 1.61, "step": 858 }, { "epoch": 1.9996115750631192, "grad_norm": 0.23828125, "learning_rate": 0.0001, "loss": 0.5104, "step": 1716 }, { "epoch": 1.9996115750631192, "eval_loss": 0.5411043763160706, "eval_runtime": 805.4973, "eval_samples_per_second": 3.182, "eval_steps_per_second": 1.592, "step": 1716 }, { "epoch": 2.9994173625946785, "grad_norm": 0.3359375, "learning_rate": 5e-05, "loss": 0.3799, "step": 2574 }, { "epoch": 2.9994173625946785, "eval_loss": 0.5124370455741882, "eval_runtime": 806.6908, "eval_samples_per_second": 3.177, "eval_steps_per_second": 1.589, "step": 2574 }, { "epoch": 3.9992231501262383, "grad_norm": 0.40234375, "learning_rate": 0.0, "loss": 0.2803, "step": 3432 }, { "epoch": 3.9992231501262383, "eval_loss": 0.5235692858695984, "eval_runtime": 815.8051, "eval_samples_per_second": 3.142, "eval_steps_per_second": 1.571, "step": 3432 } ], "logging_steps": 500, "max_steps": 3432, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.0617164129518879e+18, "train_batch_size": 2, "trial_name": null, "trial_params": null }