hoanbklucky's picture
End of training
078f998 verified
raw
history blame
7.53 kB
{
"best_metric": 0.9080459770114943,
"best_model_checkpoint": "dinov2-small-imagenet1k-1-layer-finetuned-noh\\checkpoint-46",
"epoch": 9.577777777777778,
"eval_steps": 500,
"global_step": 220,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4444444444444444,
"grad_norm": 467.71881103515625,
"learning_rate": 2.272727272727273e-05,
"loss": 0.7354,
"step": 10
},
{
"epoch": 0.8888888888888888,
"grad_norm": 400.1885986328125,
"learning_rate": 4.545454545454546e-05,
"loss": 0.6331,
"step": 20
},
{
"epoch": 1.0,
"eval_accuracy": 0.7651888341543513,
"eval_loss": 0.5416210889816284,
"eval_runtime": 69.3532,
"eval_samples_per_second": 8.781,
"eval_steps_per_second": 0.562,
"step": 23
},
{
"epoch": 1.3111111111111111,
"grad_norm": 38.50715255737305,
"learning_rate": 4.797979797979798e-05,
"loss": 0.5552,
"step": 30
},
{
"epoch": 1.7555555555555555,
"grad_norm": 288.6549072265625,
"learning_rate": 4.545454545454546e-05,
"loss": 0.4913,
"step": 40
},
{
"epoch": 2.0,
"eval_accuracy": 0.9080459770114943,
"eval_loss": 0.37552881240844727,
"eval_runtime": 62.5531,
"eval_samples_per_second": 9.736,
"eval_steps_per_second": 0.623,
"step": 46
},
{
"epoch": 2.1777777777777776,
"grad_norm": 73.34674835205078,
"learning_rate": 4.292929292929293e-05,
"loss": 0.4369,
"step": 50
},
{
"epoch": 2.6222222222222222,
"grad_norm": 61.16302490234375,
"learning_rate": 4.0404040404040405e-05,
"loss": 0.4642,
"step": 60
},
{
"epoch": 3.0,
"eval_accuracy": 0.6141215106732348,
"eval_loss": 0.7140535116195679,
"eval_runtime": 68.2847,
"eval_samples_per_second": 8.919,
"eval_steps_per_second": 0.571,
"step": 69
},
{
"epoch": 3.0444444444444443,
"grad_norm": 245.7981414794922,
"learning_rate": 3.787878787878788e-05,
"loss": 0.4971,
"step": 70
},
{
"epoch": 3.488888888888889,
"grad_norm": 59.85107421875,
"learning_rate": 3.535353535353535e-05,
"loss": 0.4248,
"step": 80
},
{
"epoch": 3.9333333333333336,
"grad_norm": 26.801841735839844,
"learning_rate": 3.282828282828283e-05,
"loss": 0.4451,
"step": 90
},
{
"epoch": 4.0,
"eval_accuracy": 0.8045977011494253,
"eval_loss": 0.4347776770591736,
"eval_runtime": 63.7065,
"eval_samples_per_second": 9.559,
"eval_steps_per_second": 0.612,
"step": 92
},
{
"epoch": 4.355555555555555,
"grad_norm": 125.90353393554688,
"learning_rate": 3.0303030303030306e-05,
"loss": 0.3966,
"step": 100
},
{
"epoch": 4.8,
"grad_norm": 27.96786880493164,
"learning_rate": 2.777777777777778e-05,
"loss": 0.4095,
"step": 110
},
{
"epoch": 5.0,
"eval_accuracy": 0.8029556650246306,
"eval_loss": 0.5060437917709351,
"eval_runtime": 66.7998,
"eval_samples_per_second": 9.117,
"eval_steps_per_second": 0.584,
"step": 115
},
{
"epoch": 5.222222222222222,
"grad_norm": 28.08486557006836,
"learning_rate": 2.5252525252525256e-05,
"loss": 0.3595,
"step": 120
},
{
"epoch": 5.666666666666667,
"grad_norm": 44.42402267456055,
"learning_rate": 2.272727272727273e-05,
"loss": 0.3399,
"step": 130
},
{
"epoch": 6.0,
"eval_accuracy": 0.7372742200328407,
"eval_loss": 0.546423614025116,
"eval_runtime": 63.3896,
"eval_samples_per_second": 9.607,
"eval_steps_per_second": 0.615,
"step": 138
},
{
"epoch": 6.088888888888889,
"grad_norm": 93.54395294189453,
"learning_rate": 2.0202020202020203e-05,
"loss": 0.333,
"step": 140
},
{
"epoch": 6.533333333333333,
"grad_norm": 56.091121673583984,
"learning_rate": 1.7676767676767676e-05,
"loss": 0.434,
"step": 150
},
{
"epoch": 6.977777777777778,
"grad_norm": 24.830839157104492,
"learning_rate": 1.5151515151515153e-05,
"loss": 0.3304,
"step": 160
},
{
"epoch": 7.0,
"eval_accuracy": 0.8883415435139573,
"eval_loss": 0.32744449377059937,
"eval_runtime": 63.7561,
"eval_samples_per_second": 9.552,
"eval_steps_per_second": 0.612,
"step": 161
},
{
"epoch": 7.4,
"grad_norm": 59.196083068847656,
"learning_rate": 1.2626262626262628e-05,
"loss": 0.2963,
"step": 170
},
{
"epoch": 7.844444444444444,
"grad_norm": 22.917722702026367,
"learning_rate": 1.0101010101010101e-05,
"loss": 0.3539,
"step": 180
},
{
"epoch": 8.0,
"eval_accuracy": 0.8604269293924466,
"eval_loss": 0.38926875591278076,
"eval_runtime": 63.7316,
"eval_samples_per_second": 9.556,
"eval_steps_per_second": 0.612,
"step": 184
},
{
"epoch": 8.266666666666667,
"grad_norm": 87.52684783935547,
"learning_rate": 7.5757575757575764e-06,
"loss": 0.2673,
"step": 190
},
{
"epoch": 8.71111111111111,
"grad_norm": 21.081283569335938,
"learning_rate": 5.050505050505051e-06,
"loss": 0.2849,
"step": 200
},
{
"epoch": 9.0,
"eval_accuracy": 0.8637110016420362,
"eval_loss": 0.3758099675178528,
"eval_runtime": 62.4924,
"eval_samples_per_second": 9.745,
"eval_steps_per_second": 0.624,
"step": 207
},
{
"epoch": 9.133333333333333,
"grad_norm": 63.325809478759766,
"learning_rate": 2.5252525252525253e-06,
"loss": 0.265,
"step": 210
},
{
"epoch": 9.577777777777778,
"grad_norm": 38.6427116394043,
"learning_rate": 0.0,
"loss": 0.2605,
"step": 220
},
{
"epoch": 9.577777777777778,
"eval_accuracy": 0.8489326765188834,
"eval_loss": 0.3968973755836487,
"eval_runtime": 63.9436,
"eval_samples_per_second": 9.524,
"eval_steps_per_second": 0.61,
"step": 220
},
{
"epoch": 9.577777777777778,
"step": 220,
"total_flos": 2.7458744422511e+17,
"train_loss": 0.4097213279117237,
"train_runtime": 2363.2087,
"train_samples_per_second": 6.089,
"train_steps_per_second": 0.093
}
],
"logging_steps": 10,
"max_steps": 220,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7458744422511e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}