|
{ |
|
"best_metric": 0.025097770616412163, |
|
"best_model_checkpoint": "./results_pnum/results_cvrev_pnum_f4_large_b4e15_5000/checkpoint-10000", |
|
"epoch": 10.26694045174538, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.914946777561009e-05, |
|
"loss": 0.5832, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.829380155388986e-05, |
|
"loss": 0.0339, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.743813533216963e-05, |
|
"loss": 0.0314, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 4.65824691104494e-05, |
|
"loss": 0.0301, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.572680288872916e-05, |
|
"loss": 0.0292, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.487113666700893e-05, |
|
"loss": 0.029, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 4.40154704452887e-05, |
|
"loss": 0.0278, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.3159804223568475e-05, |
|
"loss": 0.0273, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.2304138001848244e-05, |
|
"loss": 0.0261, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 4.144847178012801e-05, |
|
"loss": 0.0266, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"eval_loss": 0.025240449234843254, |
|
"eval_runtime": 181.3134, |
|
"eval_samples_per_second": 14.307, |
|
"eval_steps_per_second": 3.579, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 4.0592805558407774e-05, |
|
"loss": 0.026, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 3.973713933668754e-05, |
|
"loss": 0.0265, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.888147311496732e-05, |
|
"loss": 0.0248, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.8025806893247086e-05, |
|
"loss": 0.0247, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.7170140671526854e-05, |
|
"loss": 0.0245, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 3.631447444980662e-05, |
|
"loss": 0.0241, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 3.5458808228086384e-05, |
|
"loss": 0.0226, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 3.460314200636616e-05, |
|
"loss": 0.0232, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 3.374747578464593e-05, |
|
"loss": 0.0232, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"learning_rate": 3.2891809562925697e-05, |
|
"loss": 0.0215, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.13, |
|
"eval_loss": 0.025097770616412163, |
|
"eval_runtime": 120.6593, |
|
"eval_samples_per_second": 21.499, |
|
"eval_steps_per_second": 5.379, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 3.2036143341205465e-05, |
|
"loss": 0.0211, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.65, |
|
"learning_rate": 3.1180477119485233e-05, |
|
"loss": 0.0211, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.9, |
|
"learning_rate": 3.0324810897765e-05, |
|
"loss": 0.0216, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 2.946914467604477e-05, |
|
"loss": 0.0201, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 2.861347845432454e-05, |
|
"loss": 0.0197, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 2.7757812232604307e-05, |
|
"loss": 0.0196, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 2.690214601088408e-05, |
|
"loss": 0.0186, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 7.19, |
|
"learning_rate": 2.6046479789163847e-05, |
|
"loss": 0.0179, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 2.5190813567443612e-05, |
|
"loss": 0.0178, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 2.433514734572338e-05, |
|
"loss": 0.0178, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"eval_loss": 0.02974303811788559, |
|
"eval_runtime": 120.9977, |
|
"eval_samples_per_second": 21.438, |
|
"eval_steps_per_second": 5.364, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 2.347948112400315e-05, |
|
"loss": 0.0174, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 2.2623814902282918e-05, |
|
"loss": 0.0159, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 2.1768148680562686e-05, |
|
"loss": 0.016, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 2.0912482458842455e-05, |
|
"loss": 0.0158, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"learning_rate": 2.0056816237122226e-05, |
|
"loss": 0.0163, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 9.24, |
|
"learning_rate": 1.920115001540199e-05, |
|
"loss": 0.014, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.834548379368176e-05, |
|
"loss": 0.0143, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 1.748981757196153e-05, |
|
"loss": 0.0149, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 1.6634151350241297e-05, |
|
"loss": 0.0142, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"learning_rate": 1.577848512852107e-05, |
|
"loss": 0.0128, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"eval_loss": 0.03699657693505287, |
|
"eval_runtime": 120.8215, |
|
"eval_samples_per_second": 21.47, |
|
"eval_steps_per_second": 5.372, |
|
"step": 20000 |
|
} |
|
], |
|
"max_steps": 29220, |
|
"num_train_epochs": 15, |
|
"total_flos": 1.7313883226112e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|