|
{ |
|
"best_metric": 0.040399424731731415, |
|
"best_model_checkpoint": "doc-topic-model_eval-04_train-02/checkpoint-7000", |
|
"epoch": 3.4516765285996054, |
|
"eval_steps": 1000, |
|
"global_step": 7000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3822348415851593, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1664, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.40402480959892273, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0944, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981496987239155, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.08949843794107437, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.0575, |
|
"eval_samples_per_second": 672.857, |
|
"eval_steps_per_second": 2.654, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.385586678981781, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0863, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3922920525074005, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0769, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.981509738166785, |
|
"eval_f1": 0.0013773050730736859, |
|
"eval_loss": 0.0685219094157219, |
|
"eval_precision": 1.0, |
|
"eval_recall": 0.0006891271056661562, |
|
"eval_runtime": 14.5319, |
|
"eval_samples_per_second": 558.288, |
|
"eval_steps_per_second": 2.202, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.40515828132629395, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0677, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.3946343958377838, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0607, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9822932118311607, |
|
"eval_f1": 0.10664760543245175, |
|
"eval_loss": 0.05599680170416832, |
|
"eval_precision": 0.8021505376344086, |
|
"eval_recall": 0.057120980091883614, |
|
"eval_runtime": 14.3217, |
|
"eval_samples_per_second": 566.482, |
|
"eval_steps_per_second": 2.234, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.35836198925971985, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0564, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.385370135307312, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9845742111071913, |
|
"eval_f1": 0.3747559434937407, |
|
"eval_loss": 0.050051942467689514, |
|
"eval_precision": 0.7494258153422141, |
|
"eval_recall": 0.2498468606431853, |
|
"eval_runtime": 14.4421, |
|
"eval_samples_per_second": 561.762, |
|
"eval_steps_per_second": 2.216, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3222791850566864, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0476, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.3560543656349182, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0466, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9858323026333499, |
|
"eval_f1": 0.48990002040399916, |
|
"eval_loss": 0.04502652958035469, |
|
"eval_precision": 0.73380195599022, |
|
"eval_recall": 0.367687595712098, |
|
"eval_runtime": 14.4276, |
|
"eval_samples_per_second": 562.324, |
|
"eval_steps_per_second": 2.218, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.48908740282058716, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.3385593891143799, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0441, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9862899192582927, |
|
"eval_f1": 0.5084074168148336, |
|
"eval_loss": 0.042144790291786194, |
|
"eval_precision": 0.7553207547169811, |
|
"eval_recall": 0.38315467075038284, |
|
"eval_runtime": 14.4543, |
|
"eval_samples_per_second": 561.288, |
|
"eval_steps_per_second": 2.214, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.30710557103157043, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.039, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.38062548637390137, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0391, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9867758712779688, |
|
"eval_f1": 0.5580910898589149, |
|
"eval_loss": 0.040399424731731415, |
|
"eval_precision": 0.7310840982386505, |
|
"eval_recall": 0.45130168453292496, |
|
"eval_runtime": 14.4294, |
|
"eval_samples_per_second": 562.253, |
|
"eval_steps_per_second": 2.218, |
|
"step": 7000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 157991679316152.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|