|
{
|
|
"best_metric": 0.8981937602627258,
|
|
"best_model_checkpoint": "dinov2-small-imagenet1k-1-layer-finetuned-noh\\checkpoint-69",
|
|
"epoch": 9.577777777777778,
|
|
"eval_steps": 500,
|
|
"global_step": 220,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.4444444444444444,
|
|
"grad_norm": 159.195556640625,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.6375,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.8888888888888888,
|
|
"grad_norm": 104.19737243652344,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.5315,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 1.0,
|
|
"eval_accuracy": 0.24794745484400657,
|
|
"eval_loss": 1.2674404382705688,
|
|
"eval_runtime": 77.3353,
|
|
"eval_samples_per_second": 7.875,
|
|
"eval_steps_per_second": 0.504,
|
|
"step": 23
|
|
},
|
|
{
|
|
"epoch": 1.3111111111111111,
|
|
"grad_norm": 71.8785400390625,
|
|
"learning_rate": 4.797979797979798e-05,
|
|
"loss": 0.6264,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 1.7555555555555555,
|
|
"grad_norm": 95.69356536865234,
|
|
"learning_rate": 4.545454545454546e-05,
|
|
"loss": 0.4629,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 2.0,
|
|
"eval_accuracy": 0.7881773399014779,
|
|
"eval_loss": 0.5133813619613647,
|
|
"eval_runtime": 70.8468,
|
|
"eval_samples_per_second": 8.596,
|
|
"eval_steps_per_second": 0.55,
|
|
"step": 46
|
|
},
|
|
{
|
|
"epoch": 2.1777777777777776,
|
|
"grad_norm": 193.81893920898438,
|
|
"learning_rate": 4.292929292929293e-05,
|
|
"loss": 0.3744,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 2.6222222222222222,
|
|
"grad_norm": 42.527183532714844,
|
|
"learning_rate": 4.0404040404040405e-05,
|
|
"loss": 0.4368,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 3.0,
|
|
"eval_accuracy": 0.8981937602627258,
|
|
"eval_loss": 0.3057605028152466,
|
|
"eval_runtime": 67.9327,
|
|
"eval_samples_per_second": 8.965,
|
|
"eval_steps_per_second": 0.574,
|
|
"step": 69
|
|
},
|
|
{
|
|
"epoch": 3.0444444444444443,
|
|
"grad_norm": 112.69408416748047,
|
|
"learning_rate": 3.787878787878788e-05,
|
|
"loss": 0.4417,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 3.488888888888889,
|
|
"grad_norm": 51.07742691040039,
|
|
"learning_rate": 3.535353535353535e-05,
|
|
"loss": 0.3974,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 3.9333333333333336,
|
|
"grad_norm": 114.08860778808594,
|
|
"learning_rate": 3.282828282828283e-05,
|
|
"loss": 0.4123,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 4.0,
|
|
"eval_accuracy": 0.8045977011494253,
|
|
"eval_loss": 0.41477170586586,
|
|
"eval_runtime": 67.1988,
|
|
"eval_samples_per_second": 9.063,
|
|
"eval_steps_per_second": 0.58,
|
|
"step": 92
|
|
},
|
|
{
|
|
"epoch": 4.355555555555555,
|
|
"grad_norm": 57.1572265625,
|
|
"learning_rate": 3.0303030303030306e-05,
|
|
"loss": 0.2962,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 4.8,
|
|
"grad_norm": 73.67029571533203,
|
|
"learning_rate": 2.777777777777778e-05,
|
|
"loss": 0.3301,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 5.0,
|
|
"eval_accuracy": 0.8735632183908046,
|
|
"eval_loss": 0.35200509428977966,
|
|
"eval_runtime": 67.892,
|
|
"eval_samples_per_second": 8.97,
|
|
"eval_steps_per_second": 0.574,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 5.222222222222222,
|
|
"grad_norm": 72.7608871459961,
|
|
"learning_rate": 2.5252525252525256e-05,
|
|
"loss": 0.2722,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 5.666666666666667,
|
|
"grad_norm": 54.74464797973633,
|
|
"learning_rate": 2.272727272727273e-05,
|
|
"loss": 0.2907,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 6.0,
|
|
"eval_accuracy": 0.8440065681444991,
|
|
"eval_loss": 0.441454142332077,
|
|
"eval_runtime": 67.5009,
|
|
"eval_samples_per_second": 9.022,
|
|
"eval_steps_per_second": 0.578,
|
|
"step": 138
|
|
},
|
|
{
|
|
"epoch": 6.088888888888889,
|
|
"grad_norm": 79.50174713134766,
|
|
"learning_rate": 2.0202020202020203e-05,
|
|
"loss": 0.2261,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 6.533333333333333,
|
|
"grad_norm": 89.56454467773438,
|
|
"learning_rate": 1.7676767676767676e-05,
|
|
"loss": 0.2502,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 6.977777777777778,
|
|
"grad_norm": 181.8550567626953,
|
|
"learning_rate": 1.5151515151515153e-05,
|
|
"loss": 0.2809,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 7.0,
|
|
"eval_accuracy": 0.7520525451559934,
|
|
"eval_loss": 0.5785913467407227,
|
|
"eval_runtime": 74.2006,
|
|
"eval_samples_per_second": 8.207,
|
|
"eval_steps_per_second": 0.526,
|
|
"step": 161
|
|
},
|
|
{
|
|
"epoch": 7.4,
|
|
"grad_norm": 53.69293212890625,
|
|
"learning_rate": 1.2626262626262628e-05,
|
|
"loss": 0.2154,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 7.844444444444444,
|
|
"grad_norm": 48.94661331176758,
|
|
"learning_rate": 1.0101010101010101e-05,
|
|
"loss": 0.2243,
|
|
"step": 180
|
|
},
|
|
{
|
|
"epoch": 8.0,
|
|
"eval_accuracy": 0.8752052545155994,
|
|
"eval_loss": 0.4724029004573822,
|
|
"eval_runtime": 70.7275,
|
|
"eval_samples_per_second": 8.611,
|
|
"eval_steps_per_second": 0.551,
|
|
"step": 184
|
|
},
|
|
{
|
|
"epoch": 8.266666666666667,
|
|
"grad_norm": 52.60847854614258,
|
|
"learning_rate": 7.5757575757575764e-06,
|
|
"loss": 0.1612,
|
|
"step": 190
|
|
},
|
|
{
|
|
"epoch": 8.71111111111111,
|
|
"grad_norm": 54.77922058105469,
|
|
"learning_rate": 5.050505050505051e-06,
|
|
"loss": 0.1968,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 9.0,
|
|
"eval_accuracy": 0.8702791461412152,
|
|
"eval_loss": 0.5452057719230652,
|
|
"eval_runtime": 69.5328,
|
|
"eval_samples_per_second": 8.758,
|
|
"eval_steps_per_second": 0.561,
|
|
"step": 207
|
|
},
|
|
{
|
|
"epoch": 9.133333333333333,
|
|
"grad_norm": 76.0404281616211,
|
|
"learning_rate": 2.5252525252525253e-06,
|
|
"loss": 0.1506,
|
|
"step": 210
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"grad_norm": 52.908023834228516,
|
|
"learning_rate": 0.0,
|
|
"loss": 0.1601,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"eval_accuracy": 0.8440065681444991,
|
|
"eval_loss": 0.5386197566986084,
|
|
"eval_runtime": 71.1162,
|
|
"eval_samples_per_second": 8.563,
|
|
"eval_steps_per_second": 0.548,
|
|
"step": 220
|
|
},
|
|
{
|
|
"epoch": 9.577777777777778,
|
|
"step": 220,
|
|
"total_flos": 2.7968752299762893e+17,
|
|
"train_loss": 0.335261548649181,
|
|
"train_runtime": 2511.8857,
|
|
"train_samples_per_second": 5.729,
|
|
"train_steps_per_second": 0.088
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 220,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 10,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 2.7968752299762893e+17,
|
|
"train_batch_size": 16,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|