|
{ |
|
"best_metric": 0.9397333333333333, |
|
"best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0514/checkpoints/checkpoint-2664", |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 2664, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": Infinity, |
|
"learning_rate": 4.851018993780429e-05, |
|
"loss": 0.8809, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8861333333333333, |
|
"eval_loss": 0.37876641750335693, |
|
"eval_runtime": 4.5109, |
|
"eval_samples_per_second": 831.32, |
|
"eval_steps_per_second": 13.079, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.03563673049211502, |
|
"learning_rate": 4.4184758936602036e-05, |
|
"loss": 0.2394, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8896, |
|
"eval_loss": 0.37777596712112427, |
|
"eval_runtime": 4.5676, |
|
"eval_samples_per_second": 821.009, |
|
"eval_steps_per_second": 12.917, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 54.55916976928711, |
|
"learning_rate": 3.7545362870758595e-05, |
|
"loss": 0.1152, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9096, |
|
"eval_loss": 0.34404507279396057, |
|
"eval_runtime": 5.3039, |
|
"eval_samples_per_second": 707.025, |
|
"eval_steps_per_second": 11.124, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.11908464878797531, |
|
"learning_rate": 2.9392810892864702e-05, |
|
"loss": 0.0568, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9128, |
|
"eval_loss": 0.33645790815353394, |
|
"eval_runtime": 4.6539, |
|
"eval_samples_per_second": 805.768, |
|
"eval_steps_per_second": 12.677, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.027070844545960426, |
|
"learning_rate": 2.0710421090307246e-05, |
|
"loss": 0.0286, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9168, |
|
"eval_loss": 0.32356399297714233, |
|
"eval_runtime": 5.0894, |
|
"eval_samples_per_second": 736.83, |
|
"eval_steps_per_second": 11.593, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.0029073324985802174, |
|
"learning_rate": 1.2545417811701016e-05, |
|
"loss": 0.0091, |
|
"step": 1998 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9261333333333334, |
|
"eval_loss": 0.32211950421333313, |
|
"eval_runtime": 4.4465, |
|
"eval_samples_per_second": 843.369, |
|
"eval_steps_per_second": 13.269, |
|
"step": 1998 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.0026934300549328327, |
|
"learning_rate": 5.882620955060414e-06, |
|
"loss": 0.0031, |
|
"step": 2331 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9362666666666667, |
|
"eval_loss": 0.2789290249347687, |
|
"eval_runtime": 5.0466, |
|
"eval_samples_per_second": 743.077, |
|
"eval_steps_per_second": 11.691, |
|
"step": 2331 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.018637239933013916, |
|
"learning_rate": 1.5256621537052152e-06, |
|
"loss": 0.0011, |
|
"step": 2664 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9397333333333333, |
|
"eval_loss": 0.26498427987098694, |
|
"eval_runtime": 4.4907, |
|
"eval_samples_per_second": 835.053, |
|
"eval_steps_per_second": 13.138, |
|
"step": 2664 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2997, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.317930564980736e+19, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|