|
{ |
|
"best_metric": 0.9352708058124174, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finalterm/checkpoint-166", |
|
"epoch": 9.68421052631579, |
|
"eval_steps": 500, |
|
"global_step": 230, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 6.019707679748535, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 1.7902, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 5.092822551727295, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 1.2727, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.968421052631579, |
|
"eval_accuracy": 0.8335535006605019, |
|
"eval_loss": 0.5535598993301392, |
|
"eval_runtime": 3.1512, |
|
"eval_samples_per_second": 240.223, |
|
"eval_steps_per_second": 7.616, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 6.4061150550842285, |
|
"learning_rate": 4.830917874396135e-05, |
|
"loss": 0.5623, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 4.992868900299072, |
|
"learning_rate": 4.589371980676328e-05, |
|
"loss": 0.3845, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9789473684210526, |
|
"eval_accuracy": 0.9114927344782034, |
|
"eval_loss": 0.23858527839183807, |
|
"eval_runtime": 3.1195, |
|
"eval_samples_per_second": 242.671, |
|
"eval_steps_per_second": 7.694, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 5.19857120513916, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 0.3797, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 6.890084743499756, |
|
"learning_rate": 4.106280193236715e-05, |
|
"loss": 0.3025, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 4.1722092628479, |
|
"learning_rate": 3.864734299516908e-05, |
|
"loss": 0.2725, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.9894736842105263, |
|
"eval_accuracy": 0.9233817701453104, |
|
"eval_loss": 0.21346069872379303, |
|
"eval_runtime": 3.2718, |
|
"eval_samples_per_second": 231.37, |
|
"eval_steps_per_second": 7.335, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 3.3684210526315788, |
|
"grad_norm": 3.6963953971862793, |
|
"learning_rate": 3.6231884057971014e-05, |
|
"loss": 0.2404, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.7894736842105265, |
|
"grad_norm": 4.915622711181641, |
|
"learning_rate": 3.381642512077295e-05, |
|
"loss": 0.2442, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9075297225891678, |
|
"eval_loss": 0.2290719598531723, |
|
"eval_runtime": 3.1408, |
|
"eval_samples_per_second": 241.02, |
|
"eval_steps_per_second": 7.641, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"grad_norm": 3.525477170944214, |
|
"learning_rate": 3.140096618357488e-05, |
|
"loss": 0.265, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.631578947368421, |
|
"grad_norm": 4.751044750213623, |
|
"learning_rate": 2.8985507246376814e-05, |
|
"loss": 0.2097, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.968421052631579, |
|
"eval_accuracy": 0.9207397622192867, |
|
"eval_loss": 0.19642269611358643, |
|
"eval_runtime": 3.1107, |
|
"eval_samples_per_second": 243.355, |
|
"eval_steps_per_second": 7.715, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 5.052631578947368, |
|
"grad_norm": 6.279447078704834, |
|
"learning_rate": 2.6570048309178748e-05, |
|
"loss": 0.2359, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.473684210526316, |
|
"grad_norm": 5.107997894287109, |
|
"learning_rate": 2.4154589371980676e-05, |
|
"loss": 0.2293, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 5.894736842105263, |
|
"grad_norm": 4.217101097106934, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 0.2237, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 5.978947368421053, |
|
"eval_accuracy": 0.9286657859973579, |
|
"eval_loss": 0.19202813506126404, |
|
"eval_runtime": 3.1318, |
|
"eval_samples_per_second": 241.714, |
|
"eval_steps_per_second": 7.663, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 6.315789473684211, |
|
"grad_norm": 4.676567077636719, |
|
"learning_rate": 1.932367149758454e-05, |
|
"loss": 0.1789, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 6.7368421052631575, |
|
"grad_norm": 4.55054235458374, |
|
"learning_rate": 1.6908212560386476e-05, |
|
"loss": 0.2199, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 6.989473684210527, |
|
"eval_accuracy": 0.9352708058124174, |
|
"eval_loss": 0.1843554824590683, |
|
"eval_runtime": 3.1189, |
|
"eval_samples_per_second": 242.711, |
|
"eval_steps_per_second": 7.695, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 7.157894736842105, |
|
"grad_norm": 4.944462776184082, |
|
"learning_rate": 1.4492753623188407e-05, |
|
"loss": 0.2016, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 7.578947368421053, |
|
"grad_norm": 4.52495813369751, |
|
"learning_rate": 1.2077294685990338e-05, |
|
"loss": 0.1954, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 5.276759624481201, |
|
"learning_rate": 9.66183574879227e-06, |
|
"loss": 0.2209, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9273447820343461, |
|
"eval_loss": 0.18566328287124634, |
|
"eval_runtime": 3.1379, |
|
"eval_samples_per_second": 241.243, |
|
"eval_steps_per_second": 7.648, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 8.421052631578947, |
|
"grad_norm": 4.787791728973389, |
|
"learning_rate": 7.246376811594203e-06, |
|
"loss": 0.1914, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 8.842105263157894, |
|
"grad_norm": 4.165464401245117, |
|
"learning_rate": 4.830917874396135e-06, |
|
"loss": 0.1717, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 8.968421052631578, |
|
"eval_accuracy": 0.9313077939233818, |
|
"eval_loss": 0.18422812223434448, |
|
"eval_runtime": 3.1181, |
|
"eval_samples_per_second": 242.779, |
|
"eval_steps_per_second": 7.697, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 9.263157894736842, |
|
"grad_norm": 2.85178279876709, |
|
"learning_rate": 2.4154589371980677e-06, |
|
"loss": 0.1532, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"grad_norm": 4.912642478942871, |
|
"learning_rate": 0.0, |
|
"loss": 0.1754, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"eval_accuracy": 0.9313077939233818, |
|
"eval_loss": 0.18370747566223145, |
|
"eval_runtime": 3.1098, |
|
"eval_samples_per_second": 243.427, |
|
"eval_steps_per_second": 7.718, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"step": 230, |
|
"total_flos": 7.291573574754632e+17, |
|
"train_loss": 0.36177816701971965, |
|
"train_runtime": 310.2655, |
|
"train_samples_per_second": 97.594, |
|
"train_steps_per_second": 0.741 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 230, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.291573574754632e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|