|
{ |
|
"best_metric": 0.43546666666666667, |
|
"best_model_checkpoint": "./vit_finetuned_models_dataset/CIFAR100/50_from_100/facebook_dino-vitb16/model_idx_0032/checkpoints/checkpoint-2664", |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 2664, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": Infinity, |
|
"learning_rate": 0.0003, |
|
"loss": 3.2976, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.22826666666666667, |
|
"eval_loss": 2.8975749015808105, |
|
"eval_runtime": 13.0548, |
|
"eval_samples_per_second": 287.251, |
|
"eval_steps_per_second": 4.519, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 21.59611701965332, |
|
"learning_rate": 0.0003, |
|
"loss": 2.7505, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.2928, |
|
"eval_loss": 2.630790948867798, |
|
"eval_runtime": 11.9992, |
|
"eval_samples_per_second": 312.521, |
|
"eval_steps_per_second": 4.917, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 24.062456130981445, |
|
"learning_rate": 0.0003, |
|
"loss": 2.5177, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.3138666666666667, |
|
"eval_loss": 2.5008890628814697, |
|
"eval_runtime": 12.175, |
|
"eval_samples_per_second": 308.007, |
|
"eval_steps_per_second": 4.846, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 27.035898208618164, |
|
"learning_rate": 0.0003, |
|
"loss": 2.3505, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.3344, |
|
"eval_loss": 2.4544920921325684, |
|
"eval_runtime": 12.5532, |
|
"eval_samples_per_second": 298.728, |
|
"eval_steps_per_second": 4.7, |
|
"step": 1332 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 24.694316864013672, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1837, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.3658666666666667, |
|
"eval_loss": 2.276373863220215, |
|
"eval_runtime": 12.1772, |
|
"eval_samples_per_second": 307.952, |
|
"eval_steps_per_second": 4.845, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 16.867450714111328, |
|
"learning_rate": 0.0003, |
|
"loss": 2.0426, |
|
"step": 1998 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.3973333333333333, |
|
"eval_loss": 2.1897079944610596, |
|
"eval_runtime": 12.1427, |
|
"eval_samples_per_second": 308.829, |
|
"eval_steps_per_second": 4.859, |
|
"step": 1998 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 34.3739013671875, |
|
"learning_rate": 0.0003, |
|
"loss": 1.9154, |
|
"step": 2331 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.41573333333333334, |
|
"eval_loss": 2.1101248264312744, |
|
"eval_runtime": 12.2551, |
|
"eval_samples_per_second": 305.996, |
|
"eval_steps_per_second": 4.814, |
|
"step": 2331 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": Infinity, |
|
"learning_rate": 0.0003, |
|
"loss": 1.8021, |
|
"step": 2664 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.43546666666666667, |
|
"eval_loss": 2.014970302581787, |
|
"eval_runtime": 13.8233, |
|
"eval_samples_per_second": 271.281, |
|
"eval_steps_per_second": 4.268, |
|
"step": 2664 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2997, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.317930564980736e+19, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|