|
{ |
|
"best_metric": 0.03805249184370041, |
|
"best_model_checkpoint": "doc-topic-model_eval-01_train-02/checkpoint-10000", |
|
"epoch": 4.930966469428008, |
|
"eval_steps": 1000, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3822348415851593, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1664, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.40402480959892273, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0944, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.089978888630867, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 11.9647, |
|
"eval_samples_per_second": 677.827, |
|
"eval_steps_per_second": 2.675, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.385586678981781, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0863, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3922920525074005, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0769, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.9813966013294216, |
|
"eval_f1": 0.0015213753232922562, |
|
"eval_loss": 0.06878085434436798, |
|
"eval_precision": 0.9090909090909091, |
|
"eval_recall": 0.0007613247049866769, |
|
"eval_runtime": 14.1244, |
|
"eval_samples_per_second": 574.184, |
|
"eval_steps_per_second": 2.266, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.40515828132629395, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0677, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.3946343958377838, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0607, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.982203041512536, |
|
"eval_f1": 0.10543563439481371, |
|
"eval_loss": 0.05613817274570465, |
|
"eval_precision": 0.8203991130820399, |
|
"eval_recall": 0.056338028169014086, |
|
"eval_runtime": 14.2341, |
|
"eval_samples_per_second": 569.76, |
|
"eval_steps_per_second": 2.248, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.35836198925971985, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0564, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.385370135307312, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9844537040973964, |
|
"eval_f1": 0.36825433392846857, |
|
"eval_loss": 0.05005401372909546, |
|
"eval_precision": 0.7561494796594135, |
|
"eval_recall": 0.24339550818424058, |
|
"eval_runtime": 14.2688, |
|
"eval_samples_per_second": 568.374, |
|
"eval_steps_per_second": 2.243, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3222791850566864, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0476, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.3560543656349182, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0466, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9856697421942543, |
|
"eval_f1": 0.48531432934588953, |
|
"eval_loss": 0.04508376494050026, |
|
"eval_precision": 0.7322580645161291, |
|
"eval_recall": 0.3629234868671488, |
|
"eval_runtime": 14.2501, |
|
"eval_samples_per_second": 569.119, |
|
"eval_steps_per_second": 2.246, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.48908740282058716, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.3385593891143799, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0441, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9862465807786612, |
|
"eval_f1": 0.5088571717785201, |
|
"eval_loss": 0.04229620099067688, |
|
"eval_precision": 0.7590215914238261, |
|
"eval_recall": 0.3827179291968024, |
|
"eval_runtime": 14.2621, |
|
"eval_samples_per_second": 568.641, |
|
"eval_steps_per_second": 2.244, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.30710557103157043, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.039, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.38062548637390137, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0391, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9865994869396375, |
|
"eval_f1": 0.5537779036292416, |
|
"eval_loss": 0.0406394861638546, |
|
"eval_precision": 0.728457909113484, |
|
"eval_recall": 0.4466692044156833, |
|
"eval_runtime": 14.3173, |
|
"eval_samples_per_second": 566.446, |
|
"eval_steps_per_second": 2.235, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.5038283467292786, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0386, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.3748466968536377, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0372, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9869070396983999, |
|
"eval_f1": 0.5536766837375592, |
|
"eval_loss": 0.03953782096505165, |
|
"eval_precision": 0.757635858786196, |
|
"eval_recall": 0.43623905595736584, |
|
"eval_runtime": 14.3803, |
|
"eval_samples_per_second": 563.966, |
|
"eval_steps_per_second": 2.225, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.4667779505252838, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0357, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.341769278049469, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0336, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9870884533072551, |
|
"eval_f1": 0.5703640822486323, |
|
"eval_loss": 0.03866717591881752, |
|
"eval_precision": 0.7494113273020201, |
|
"eval_recall": 0.46037304910544347, |
|
"eval_runtime": 14.1469, |
|
"eval_samples_per_second": 573.27, |
|
"eval_steps_per_second": 2.262, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.4473079442977905, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0336, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.4554208219051361, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9872131751633431, |
|
"eval_f1": 0.5864882207351728, |
|
"eval_loss": 0.03805249184370041, |
|
"eval_precision": 0.7368421052631579, |
|
"eval_recall": 0.48709554625047585, |
|
"eval_runtime": 14.2923, |
|
"eval_samples_per_second": 567.44, |
|
"eval_steps_per_second": 2.239, |
|
"step": 10000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 225936279463716.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|