|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"global_step": 61360, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2.877770534550196e-05, |
|
"loss": 0.5826, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 2.7555410691003913e-05, |
|
"loss": 0.4701, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.6333116036505867e-05, |
|
"loss": 0.4449, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 2.511082138200782e-05, |
|
"loss": 0.4274, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8523688232297504, |
|
"eval_loss": 0.38915878534317017, |
|
"eval_runtime": 26.5235, |
|
"eval_samples_per_second": 370.049, |
|
"eval_steps_per_second": 11.575, |
|
"step": 12272 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 2.388852672750978e-05, |
|
"loss": 0.4051, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.2666232073011734e-05, |
|
"loss": 0.2711, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.144393741851369e-05, |
|
"loss": 0.2755, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.0221642764015646e-05, |
|
"loss": 0.2808, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.89993481095176e-05, |
|
"loss": 0.2844, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8565461029037188, |
|
"eval_loss": 0.4079381227493286, |
|
"eval_runtime": 26.505, |
|
"eval_samples_per_second": 370.308, |
|
"eval_steps_per_second": 11.583, |
|
"step": 24544 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.7777053455019558e-05, |
|
"loss": 0.2537, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.6554758800521512e-05, |
|
"loss": 0.1468, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.533246414602347e-05, |
|
"loss": 0.1536, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 1.4110169491525424e-05, |
|
"loss": 0.1574, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.288787483702738e-05, |
|
"loss": 0.1589, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8526744778400408, |
|
"eval_loss": 0.503338098526001, |
|
"eval_runtime": 26.5083, |
|
"eval_samples_per_second": 370.261, |
|
"eval_steps_per_second": 11.581, |
|
"step": 36816 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.1665580182529336e-05, |
|
"loss": 0.14, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.044328552803129e-05, |
|
"loss": 0.0868, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 9.220990873533247e-06, |
|
"loss": 0.0901, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 7.998696219035203e-06, |
|
"loss": 0.0904, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 6.776401564537158e-06, |
|
"loss": 0.0877, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8575649516046867, |
|
"eval_loss": 0.6623563170433044, |
|
"eval_runtime": 26.5163, |
|
"eval_samples_per_second": 370.149, |
|
"eval_steps_per_second": 11.578, |
|
"step": 49088 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 5.554106910039114e-06, |
|
"loss": 0.0698, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 4.331812255541069e-06, |
|
"loss": 0.0468, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 3.109517601043025e-06, |
|
"loss": 0.0494, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 1.8872229465449805e-06, |
|
"loss": 0.045, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 4.89, |
|
"learning_rate": 6.649282920469362e-07, |
|
"loss": 0.0426, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8633723892002038, |
|
"eval_loss": 0.8447091579437256, |
|
"eval_runtime": 26.5083, |
|
"eval_samples_per_second": 370.261, |
|
"eval_steps_per_second": 11.581, |
|
"step": 61360 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 61360, |
|
"total_flos": 1.4676274702232822e+17, |
|
"train_loss": 0.2071122335486182, |
|
"train_runtime": 11012.4801, |
|
"train_samples_per_second": 178.299, |
|
"train_steps_per_second": 5.572 |
|
} |
|
], |
|
"max_steps": 61360, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.4676274702232822e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|