phi_adapter / trainer_state.json
Nataliia767567's picture
phi_tuned_test
728d466 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 330,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06060606060606061,
"grad_norm": 0.39797502756118774,
"learning_rate": 1.5151515151515152e-06,
"loss": 1.3973,
"step": 20
},
{
"epoch": 0.12121212121212122,
"grad_norm": 0.3954905867576599,
"learning_rate": 3.0303030303030305e-06,
"loss": 1.3793,
"step": 40
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.4346621334552765,
"learning_rate": 4.5454545454545455e-06,
"loss": 1.3916,
"step": 60
},
{
"epoch": 0.24242424242424243,
"grad_norm": 0.3516716957092285,
"learning_rate": 4.965385884295467e-06,
"loss": 1.3072,
"step": 80
},
{
"epoch": 0.30303030303030304,
"grad_norm": 0.39283487200737,
"learning_rate": 4.798150758954164e-06,
"loss": 1.2461,
"step": 100
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.4054275453090668,
"learning_rate": 4.501353102310901e-06,
"loss": 1.1411,
"step": 120
},
{
"epoch": 0.42424242424242425,
"grad_norm": 0.5167345404624939,
"learning_rate": 4.091725435297721e-06,
"loss": 1.0952,
"step": 140
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.538719654083252,
"learning_rate": 3.5923612809233987e-06,
"loss": 0.9403,
"step": 160
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.44608861207962036,
"learning_rate": 3.0314132238824416e-06,
"loss": 0.8069,
"step": 180
},
{
"epoch": 0.6060606060606061,
"grad_norm": 0.4124351143836975,
"learning_rate": 2.440505756134732e-06,
"loss": 0.7429,
"step": 200
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.49403712153434753,
"learning_rate": 1.852952387243698e-06,
"loss": 0.7128,
"step": 220
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.45238739252090454,
"learning_rate": 1.301877533199859e-06,
"loss": 0.6752,
"step": 240
},
{
"epoch": 0.7878787878787878,
"grad_norm": 0.3747016191482544,
"learning_rate": 8.183490657468687e-07,
"loss": 0.679,
"step": 260
},
{
"epoch": 0.8484848484848485,
"grad_norm": 0.3603517413139343,
"learning_rate": 4.2962680322157335e-07,
"loss": 0.6836,
"step": 280
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.4947265386581421,
"learning_rate": 1.5762568750059604e-07,
"loss": 0.691,
"step": 300
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.42248502373695374,
"learning_rate": 1.768028831677926e-08,
"loss": 0.6585,
"step": 320
},
{
"epoch": 1.0,
"step": 330,
"total_flos": 1.519725494403072e+16,
"train_loss": 0.9634178855202414,
"train_runtime": 433.8299,
"train_samples_per_second": 3.043,
"train_steps_per_second": 0.761
}
],
"logging_steps": 20,
"max_steps": 330,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.519725494403072e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}