|
{ |
|
"best_metric": 0.675394469972155, |
|
"best_model_checkpoint": "output/pretraining/vihealthbert-w_unsup-SynPD/lr3e-5_wr0.1_wd0.0/checkpoint-34000", |
|
"epoch": 5.859038428399104, |
|
"eval_steps": 2000, |
|
"global_step": 34000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00017232465965879716, |
|
"grad_norm": 35.608882904052734, |
|
"learning_rate": 5.169739789763915e-09, |
|
"loss": 10.7382, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.34464931931759435, |
|
"grad_norm": 4.884454250335693, |
|
"learning_rate": 1.033947957952783e-05, |
|
"loss": 5.8543, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.34464931931759435, |
|
"eval_accuracy": 0.3950195480446223, |
|
"eval_loss": 3.896709680557251, |
|
"eval_runtime": 24.6709, |
|
"eval_samples_per_second": 396.175, |
|
"eval_steps_per_second": 24.766, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.6892986386351887, |
|
"grad_norm": 5.278345108032227, |
|
"learning_rate": 2.067895915905566e-05, |
|
"loss": 3.4544, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.6892986386351887, |
|
"eval_accuracy": 0.530626322082222, |
|
"eval_loss": 2.811899423599243, |
|
"eval_runtime": 24.6443, |
|
"eval_samples_per_second": 396.602, |
|
"eval_steps_per_second": 24.793, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.033947957952783, |
|
"grad_norm": 4.731122016906738, |
|
"learning_rate": 2.988684014015739e-05, |
|
"loss": 2.8312, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.033947957952783, |
|
"eval_accuracy": 0.5771132321637026, |
|
"eval_loss": 2.4039628505706787, |
|
"eval_runtime": 24.6266, |
|
"eval_samples_per_second": 396.889, |
|
"eval_steps_per_second": 24.811, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.3785972772703774, |
|
"grad_norm": 12.213573455810547, |
|
"learning_rate": 2.873800907576541e-05, |
|
"loss": 2.5914, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.3785972772703774, |
|
"eval_accuracy": 0.5349752225383133, |
|
"eval_loss": 2.6481692790985107, |
|
"eval_runtime": 24.6033, |
|
"eval_samples_per_second": 397.264, |
|
"eval_steps_per_second": 24.834, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.7232465965879717, |
|
"grad_norm": 5.172228813171387, |
|
"learning_rate": 2.758917801137343e-05, |
|
"loss": 2.5649, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.7232465965879717, |
|
"eval_accuracy": 0.6087076297937304, |
|
"eval_loss": 2.13348388671875, |
|
"eval_runtime": 24.6211, |
|
"eval_samples_per_second": 396.976, |
|
"eval_steps_per_second": 24.816, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.067895915905566, |
|
"grad_norm": 4.871993064880371, |
|
"learning_rate": 2.6440346946981447e-05, |
|
"loss": 2.2749, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.067895915905566, |
|
"eval_accuracy": 0.6281529792701336, |
|
"eval_loss": 1.9895449876785278, |
|
"eval_runtime": 24.453, |
|
"eval_samples_per_second": 399.705, |
|
"eval_steps_per_second": 24.987, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.4125452352231607, |
|
"grad_norm": 8.037242889404297, |
|
"learning_rate": 2.5291515882589467e-05, |
|
"loss": 2.1572, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.4125452352231607, |
|
"eval_accuracy": 0.6353159252095423, |
|
"eval_loss": 1.9312896728515625, |
|
"eval_runtime": 24.4658, |
|
"eval_samples_per_second": 399.496, |
|
"eval_steps_per_second": 24.974, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.757194554540755, |
|
"grad_norm": 4.320974349975586, |
|
"learning_rate": 2.4142684818197487e-05, |
|
"loss": 2.1009, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.757194554540755, |
|
"eval_accuracy": 0.6428579553856829, |
|
"eval_loss": 1.864585280418396, |
|
"eval_runtime": 24.4545, |
|
"eval_samples_per_second": 399.68, |
|
"eval_steps_per_second": 24.985, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.101843873858349, |
|
"grad_norm": 4.6416168212890625, |
|
"learning_rate": 2.2993853753805504e-05, |
|
"loss": 2.0609, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.101843873858349, |
|
"eval_accuracy": 0.6450457483325106, |
|
"eval_loss": 1.8571828603744507, |
|
"eval_runtime": 24.4634, |
|
"eval_samples_per_second": 399.535, |
|
"eval_steps_per_second": 24.976, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.4464931931759435, |
|
"grad_norm": 4.341440677642822, |
|
"learning_rate": 2.184502268941352e-05, |
|
"loss": 2.0885, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.4464931931759435, |
|
"eval_accuracy": 0.6284792764252838, |
|
"eval_loss": 1.9489394426345825, |
|
"eval_runtime": 24.6433, |
|
"eval_samples_per_second": 396.618, |
|
"eval_steps_per_second": 24.794, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.791142512493538, |
|
"grad_norm": 4.660053253173828, |
|
"learning_rate": 2.069619162502154e-05, |
|
"loss": 1.9891, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.791142512493538, |
|
"eval_accuracy": 0.6582960564701575, |
|
"eval_loss": 1.7700080871582031, |
|
"eval_runtime": 24.616, |
|
"eval_samples_per_second": 397.059, |
|
"eval_steps_per_second": 24.821, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.135791831811132, |
|
"grad_norm": 3.9445292949676514, |
|
"learning_rate": 1.954736056062956e-05, |
|
"loss": 1.9368, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.135791831811132, |
|
"eval_accuracy": 0.6608816779512006, |
|
"eval_loss": 1.7397598028182983, |
|
"eval_runtime": 24.6251, |
|
"eval_samples_per_second": 396.912, |
|
"eval_steps_per_second": 24.812, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.480441151128726, |
|
"grad_norm": 4.561331748962402, |
|
"learning_rate": 1.839852949623758e-05, |
|
"loss": 1.9003, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 4.480441151128726, |
|
"eval_accuracy": 0.666368433126864, |
|
"eval_loss": 1.7164973020553589, |
|
"eval_runtime": 24.4665, |
|
"eval_samples_per_second": 399.485, |
|
"eval_steps_per_second": 24.973, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 4.825090470446321, |
|
"grad_norm": 4.358778476715088, |
|
"learning_rate": 1.72496984318456e-05, |
|
"loss": 1.9058, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 4.825090470446321, |
|
"eval_accuracy": 0.666961039556835, |
|
"eval_loss": 1.703224539756775, |
|
"eval_runtime": 24.4455, |
|
"eval_samples_per_second": 399.829, |
|
"eval_steps_per_second": 24.994, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 5.1697397897639155, |
|
"grad_norm": 4.522342205047607, |
|
"learning_rate": 1.6100867367453616e-05, |
|
"loss": 1.859, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 5.1697397897639155, |
|
"eval_accuracy": 0.6717793867023528, |
|
"eval_loss": 1.677147626876831, |
|
"eval_runtime": 24.4697, |
|
"eval_samples_per_second": 399.432, |
|
"eval_steps_per_second": 24.97, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 5.51438910908151, |
|
"grad_norm": 4.162671089172363, |
|
"learning_rate": 1.4952036303061636e-05, |
|
"loss": 1.8401, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 5.51438910908151, |
|
"eval_accuracy": 0.6709595439247201, |
|
"eval_loss": 1.665189504623413, |
|
"eval_runtime": 24.6261, |
|
"eval_samples_per_second": 396.897, |
|
"eval_steps_per_second": 24.811, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 5.859038428399104, |
|
"grad_norm": 4.618478775024414, |
|
"learning_rate": 1.3803205238669653e-05, |
|
"loss": 1.8181, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 5.859038428399104, |
|
"eval_accuracy": 0.675394469972155, |
|
"eval_loss": 1.6417487859725952, |
|
"eval_runtime": 24.5944, |
|
"eval_samples_per_second": 397.408, |
|
"eval_steps_per_second": 24.843, |
|
"step": 34000 |
|
} |
|
], |
|
"logging_steps": 2000, |
|
"max_steps": 58030, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 2000, |
|
"total_flos": 0.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|