|
{ |
|
"best_metric": 0.18186990916728973, |
|
"best_model_checkpoint": "face_obstruction_image_detection/checkpoint-11556", |
|
"epoch": 9.0, |
|
"eval_steps": 500, |
|
"global_step": 11556, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.648162627052385e-06, |
|
"loss": 0.8123, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.257232212666147e-06, |
|
"loss": 0.3519, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8846678355737385, |
|
"eval_loss": 0.2906096577644348, |
|
"eval_runtime": 140.8808, |
|
"eval_samples_per_second": 72.87, |
|
"eval_steps_per_second": 9.114, |
|
"step": 1284 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 8.866301798279907e-06, |
|
"loss": 0.2891, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 8.475371383893667e-06, |
|
"loss": 0.2514, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 8.08444096950743e-06, |
|
"loss": 0.2335, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8915838690824079, |
|
"eval_loss": 0.2299681454896927, |
|
"eval_runtime": 140.059, |
|
"eval_samples_per_second": 73.298, |
|
"eval_steps_per_second": 9.168, |
|
"step": 2568 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 7.693510555121188e-06, |
|
"loss": 0.2103, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 7.30258014073495e-06, |
|
"loss": 0.2044, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.891291642314436, |
|
"eval_loss": 0.2096417248249054, |
|
"eval_runtime": 141.1058, |
|
"eval_samples_per_second": 72.754, |
|
"eval_steps_per_second": 9.1, |
|
"step": 3852 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 6.91164972634871e-06, |
|
"loss": 0.1924, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6.520719311962471e-06, |
|
"loss": 0.1837, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 6.129788897576232e-06, |
|
"loss": 0.1807, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8915838690824079, |
|
"eval_loss": 0.19587308168411255, |
|
"eval_runtime": 137.5403, |
|
"eval_samples_per_second": 74.64, |
|
"eval_steps_per_second": 9.335, |
|
"step": 5136 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 5.7388584831899925e-06, |
|
"loss": 0.1679, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 5.347928068803754e-06, |
|
"loss": 0.1686, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8925579583089811, |
|
"eval_loss": 0.18770596385002136, |
|
"eval_runtime": 139.4839, |
|
"eval_samples_per_second": 73.6, |
|
"eval_steps_per_second": 9.205, |
|
"step": 6420 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 4.956997654417514e-06, |
|
"loss": 0.1696, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 4.566067240031275e-06, |
|
"loss": 0.165, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 4.175136825645035e-06, |
|
"loss": 0.1566, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8901227352425483, |
|
"eval_loss": 0.1865508109331131, |
|
"eval_runtime": 140.3224, |
|
"eval_samples_per_second": 73.16, |
|
"eval_steps_per_second": 9.15, |
|
"step": 7704 |
|
}, |
|
{ |
|
"epoch": 6.23, |
|
"learning_rate": 3.7842064112587963e-06, |
|
"loss": 0.1586, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.62, |
|
"learning_rate": 3.393275996872557e-06, |
|
"loss": 0.1556, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8917786869277226, |
|
"eval_loss": 0.18248248100280762, |
|
"eval_runtime": 141.8634, |
|
"eval_samples_per_second": 72.365, |
|
"eval_steps_per_second": 9.051, |
|
"step": 8988 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 3.002345582486318e-06, |
|
"loss": 0.1591, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 2.6114151681000783e-06, |
|
"loss": 0.1544, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 2.220484753713839e-06, |
|
"loss": 0.1491, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.893142411844925, |
|
"eval_loss": 0.18403713405132294, |
|
"eval_runtime": 143.6244, |
|
"eval_samples_per_second": 71.478, |
|
"eval_steps_per_second": 8.94, |
|
"step": 10272 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 1.8295543393276e-06, |
|
"loss": 0.1522, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 1.4386239249413605e-06, |
|
"loss": 0.1468, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 1.0476935105551214e-06, |
|
"loss": 0.1477, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.8919735047730372, |
|
"eval_loss": 0.18186990916728973, |
|
"eval_runtime": 143.3138, |
|
"eval_samples_per_second": 71.633, |
|
"eval_steps_per_second": 8.959, |
|
"step": 11556 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 12840, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 2.864020628943323e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|