|
{ |
|
"best_metric": 0.8717948717948718, |
|
"best_model_checkpoint": "deit-base-patch16-224-finetuned-lora-medmnistv2/checkpoint-43", |
|
"epoch": 9.142857142857142, |
|
"eval_steps": 500, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"eval_accuracy": 0.7435897435897436, |
|
"eval_f1": 0.47082767978290374, |
|
"eval_loss": 0.5025883913040161, |
|
"eval_precision": 0.8701298701298701, |
|
"eval_recall": 0.5238095238095238, |
|
"eval_runtime": 0.3964, |
|
"eval_samples_per_second": 196.791, |
|
"eval_steps_per_second": 12.615, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 2.9126269817352295, |
|
"learning_rate": 0.004375, |
|
"loss": 0.6168, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.9428571428571428, |
|
"eval_accuracy": 0.8461538461538461, |
|
"eval_f1": 0.7833333333333333, |
|
"eval_loss": 0.4762480854988098, |
|
"eval_precision": 0.8285714285714285, |
|
"eval_recall": 0.7593984962406015, |
|
"eval_runtime": 0.4194, |
|
"eval_samples_per_second": 185.972, |
|
"eval_steps_per_second": 11.921, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 4.246772289276123, |
|
"learning_rate": 0.00375, |
|
"loss": 0.5954, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.9714285714285715, |
|
"eval_accuracy": 0.7307692307692307, |
|
"eval_f1": 0.42222222222222217, |
|
"eval_loss": 0.5305488109588623, |
|
"eval_precision": 0.36538461538461536, |
|
"eval_recall": 0.5, |
|
"eval_runtime": 0.3621, |
|
"eval_samples_per_second": 215.403, |
|
"eval_steps_per_second": 13.808, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 0.7361263036727905, |
|
"learning_rate": 0.003125, |
|
"loss": 0.5934, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7692307692307693, |
|
"eval_f1": 0.5846153846153846, |
|
"eval_loss": 0.4789685606956482, |
|
"eval_precision": 0.7835616438356164, |
|
"eval_recall": 0.5864661654135338, |
|
"eval_runtime": 0.3661, |
|
"eval_samples_per_second": 213.032, |
|
"eval_steps_per_second": 13.656, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 4.571428571428571, |
|
"grad_norm": 2.504427909851074, |
|
"learning_rate": 0.0025, |
|
"loss": 0.526, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 4.914285714285715, |
|
"eval_accuracy": 0.8717948717948718, |
|
"eval_f1": 0.8194444444444444, |
|
"eval_loss": 0.3693440556526184, |
|
"eval_precision": 0.8698412698412699, |
|
"eval_recall": 0.7919799498746867, |
|
"eval_runtime": 0.3764, |
|
"eval_samples_per_second": 207.212, |
|
"eval_steps_per_second": 13.283, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 1.3234041929244995, |
|
"learning_rate": 0.001875, |
|
"loss": 0.4651, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 5.942857142857143, |
|
"eval_accuracy": 0.7948717948717948, |
|
"eval_f1": 0.7533596837944665, |
|
"eval_loss": 0.47893068194389343, |
|
"eval_precision": 0.7433962264150944, |
|
"eval_recall": 0.7694235588972431, |
|
"eval_runtime": 0.4225, |
|
"eval_samples_per_second": 184.594, |
|
"eval_steps_per_second": 11.833, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"grad_norm": 0.49008217453956604, |
|
"learning_rate": 0.00125, |
|
"loss": 0.493, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 6.9714285714285715, |
|
"eval_accuracy": 0.8205128205128205, |
|
"eval_f1": 0.7564674397859055, |
|
"eval_loss": 0.4186874330043793, |
|
"eval_precision": 0.7791706846673095, |
|
"eval_recall": 0.7418546365914787, |
|
"eval_runtime": 0.367, |
|
"eval_samples_per_second": 212.543, |
|
"eval_steps_per_second": 13.625, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.9268220663070679, |
|
"learning_rate": 0.000625, |
|
"loss": 0.4337, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8589743589743589, |
|
"eval_f1": 0.8051328639563933, |
|
"eval_loss": 0.3599574863910675, |
|
"eval_precision": 0.841733870967742, |
|
"eval_recall": 0.7832080200501252, |
|
"eval_runtime": 0.3708, |
|
"eval_samples_per_second": 210.383, |
|
"eval_steps_per_second": 13.486, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 8.914285714285715, |
|
"eval_accuracy": 0.8717948717948718, |
|
"eval_f1": 0.8260481712756467, |
|
"eval_loss": 0.3467828929424286, |
|
"eval_precision": 0.854387656702025, |
|
"eval_recall": 0.8070175438596491, |
|
"eval_runtime": 0.4353, |
|
"eval_samples_per_second": 179.205, |
|
"eval_steps_per_second": 11.487, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"grad_norm": 0.5700221657752991, |
|
"learning_rate": 0.0, |
|
"loss": 0.418, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"eval_accuracy": 0.8717948717948718, |
|
"eval_f1": 0.8194444444444444, |
|
"eval_loss": 0.3453872501850128, |
|
"eval_precision": 0.8698412698412699, |
|
"eval_recall": 0.7919799498746867, |
|
"eval_runtime": 0.4424, |
|
"eval_samples_per_second": 176.325, |
|
"eval_steps_per_second": 11.303, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"step": 80, |
|
"total_flos": 3.8966228771394355e+17, |
|
"train_loss": 0.5176919877529145, |
|
"train_runtime": 49.4572, |
|
"train_samples_per_second": 110.398, |
|
"train_steps_per_second": 1.618 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 80, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.8966228771394355e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|