|
{ |
|
"best_metric": 0.9458388375165125, |
|
"best_model_checkpoint": "swin-tiny-patch4-window7-224-finalterm/checkpoint-213", |
|
"epoch": 9.68421052631579, |
|
"eval_steps": 500, |
|
"global_step": 230, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 4.606943607330322, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 1.8065, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 4.823718547821045, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 1.3107, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.968421052631579, |
|
"eval_accuracy": 0.8348745046235139, |
|
"eval_loss": 0.6132236123085022, |
|
"eval_runtime": 3.1042, |
|
"eval_samples_per_second": 243.862, |
|
"eval_steps_per_second": 7.731, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.263157894736842, |
|
"grad_norm": 6.365540504455566, |
|
"learning_rate": 4.830917874396135e-05, |
|
"loss": 0.6282, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.6842105263157894, |
|
"grad_norm": 6.270587921142578, |
|
"learning_rate": 4.589371980676328e-05, |
|
"loss": 0.4128, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9789473684210526, |
|
"eval_accuracy": 0.9233817701453104, |
|
"eval_loss": 0.25043442845344543, |
|
"eval_runtime": 3.103, |
|
"eval_samples_per_second": 243.96, |
|
"eval_steps_per_second": 7.735, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.1052631578947367, |
|
"grad_norm": 5.101744651794434, |
|
"learning_rate": 4.347826086956522e-05, |
|
"loss": 0.3978, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.526315789473684, |
|
"grad_norm": 7.869506359100342, |
|
"learning_rate": 4.106280193236715e-05, |
|
"loss": 0.2573, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.9473684210526314, |
|
"grad_norm": 5.02071475982666, |
|
"learning_rate": 3.864734299516908e-05, |
|
"loss": 0.3188, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.9894736842105263, |
|
"eval_accuracy": 0.9313077939233818, |
|
"eval_loss": 0.20792996883392334, |
|
"eval_runtime": 3.1416, |
|
"eval_samples_per_second": 240.957, |
|
"eval_steps_per_second": 7.639, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 3.3684210526315788, |
|
"grad_norm": 5.323513031005859, |
|
"learning_rate": 3.6231884057971014e-05, |
|
"loss": 0.2418, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.7894736842105265, |
|
"grad_norm": 5.30533504486084, |
|
"learning_rate": 3.381642512077295e-05, |
|
"loss": 0.2457, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9286657859973579, |
|
"eval_loss": 0.19689106941223145, |
|
"eval_runtime": 3.1441, |
|
"eval_samples_per_second": 240.768, |
|
"eval_steps_per_second": 7.633, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 4.2105263157894735, |
|
"grad_norm": 3.670581817626953, |
|
"learning_rate": 3.140096618357488e-05, |
|
"loss": 0.2736, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.631578947368421, |
|
"grad_norm": 3.689314365386963, |
|
"learning_rate": 2.8985507246376814e-05, |
|
"loss": 0.238, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.968421052631579, |
|
"eval_accuracy": 0.9379128137384413, |
|
"eval_loss": 0.1951446682214737, |
|
"eval_runtime": 3.1119, |
|
"eval_samples_per_second": 243.263, |
|
"eval_steps_per_second": 7.712, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 5.052631578947368, |
|
"grad_norm": 6.361433982849121, |
|
"learning_rate": 2.6570048309178748e-05, |
|
"loss": 0.248, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.473684210526316, |
|
"grad_norm": 4.373514652252197, |
|
"learning_rate": 2.4154589371980676e-05, |
|
"loss": 0.2051, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 5.894736842105263, |
|
"grad_norm": 4.400747776031494, |
|
"learning_rate": 2.173913043478261e-05, |
|
"loss": 0.2, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 5.978947368421053, |
|
"eval_accuracy": 0.9418758256274768, |
|
"eval_loss": 0.17091834545135498, |
|
"eval_runtime": 3.4141, |
|
"eval_samples_per_second": 221.725, |
|
"eval_steps_per_second": 7.03, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 6.315789473684211, |
|
"grad_norm": 4.114778518676758, |
|
"learning_rate": 1.932367149758454e-05, |
|
"loss": 0.1971, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 6.7368421052631575, |
|
"grad_norm": 5.723143577575684, |
|
"learning_rate": 1.6908212560386476e-05, |
|
"loss": 0.2037, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 6.989473684210527, |
|
"eval_accuracy": 0.9431968295904888, |
|
"eval_loss": 0.17441698908805847, |
|
"eval_runtime": 3.1436, |
|
"eval_samples_per_second": 240.806, |
|
"eval_steps_per_second": 7.635, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 7.157894736842105, |
|
"grad_norm": 5.644651412963867, |
|
"learning_rate": 1.4492753623188407e-05, |
|
"loss": 0.1939, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 7.578947368421053, |
|
"grad_norm": 5.06740665435791, |
|
"learning_rate": 1.2077294685990338e-05, |
|
"loss": 0.203, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 3.859252452850342, |
|
"learning_rate": 9.66183574879227e-06, |
|
"loss": 0.1818, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9418758256274768, |
|
"eval_loss": 0.16339561343193054, |
|
"eval_runtime": 3.2688, |
|
"eval_samples_per_second": 231.582, |
|
"eval_steps_per_second": 7.342, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 8.421052631578947, |
|
"grad_norm": 6.130388259887695, |
|
"learning_rate": 7.246376811594203e-06, |
|
"loss": 0.1662, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 8.842105263157894, |
|
"grad_norm": 4.869731426239014, |
|
"learning_rate": 4.830917874396135e-06, |
|
"loss": 0.1838, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 8.968421052631578, |
|
"eval_accuracy": 0.9458388375165125, |
|
"eval_loss": 0.16089674830436707, |
|
"eval_runtime": 3.1248, |
|
"eval_samples_per_second": 242.252, |
|
"eval_steps_per_second": 7.68, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 9.263157894736842, |
|
"grad_norm": 4.596017837524414, |
|
"learning_rate": 2.4154589371980677e-06, |
|
"loss": 0.1765, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"grad_norm": 4.615110397338867, |
|
"learning_rate": 0.0, |
|
"loss": 0.1769, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"eval_accuracy": 0.9431968295904888, |
|
"eval_loss": 0.1606564074754715, |
|
"eval_runtime": 3.1146, |
|
"eval_samples_per_second": 243.05, |
|
"eval_steps_per_second": 7.706, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 9.68421052631579, |
|
"step": 230, |
|
"total_flos": 7.291573574754632e+17, |
|
"train_loss": 0.36814911676489787, |
|
"train_runtime": 310.0803, |
|
"train_samples_per_second": 97.652, |
|
"train_steps_per_second": 0.742 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 230, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.291573574754632e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|