|
{ |
|
"best_metric": 0.03680593520402908, |
|
"best_model_checkpoint": "doc-topic-model_eval-04_train-02/checkpoint-11000", |
|
"epoch": 6.410256410256411, |
|
"eval_steps": 1000, |
|
"global_step": 13000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.3822348415851593, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1664, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.40402480959892273, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0944, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981496987239155, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.08949843794107437, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.0575, |
|
"eval_samples_per_second": 672.857, |
|
"eval_steps_per_second": 2.654, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.385586678981781, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0863, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3922920525074005, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0769, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.981509738166785, |
|
"eval_f1": 0.0013773050730736859, |
|
"eval_loss": 0.0685219094157219, |
|
"eval_precision": 1.0, |
|
"eval_recall": 0.0006891271056661562, |
|
"eval_runtime": 14.5319, |
|
"eval_samples_per_second": 558.288, |
|
"eval_steps_per_second": 2.202, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.40515828132629395, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0677, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.3946343958377838, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0607, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9822932118311607, |
|
"eval_f1": 0.10664760543245175, |
|
"eval_loss": 0.05599680170416832, |
|
"eval_precision": 0.8021505376344086, |
|
"eval_recall": 0.057120980091883614, |
|
"eval_runtime": 14.3217, |
|
"eval_samples_per_second": 566.482, |
|
"eval_steps_per_second": 2.234, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.35836198925971985, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0564, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.385370135307312, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9845742111071913, |
|
"eval_f1": 0.3747559434937407, |
|
"eval_loss": 0.050051942467689514, |
|
"eval_precision": 0.7494258153422141, |
|
"eval_recall": 0.2498468606431853, |
|
"eval_runtime": 14.4421, |
|
"eval_samples_per_second": 561.762, |
|
"eval_steps_per_second": 2.216, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3222791850566864, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0476, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.3560543656349182, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.0466, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9858323026333499, |
|
"eval_f1": 0.48990002040399916, |
|
"eval_loss": 0.04502652958035469, |
|
"eval_precision": 0.73380195599022, |
|
"eval_recall": 0.367687595712098, |
|
"eval_runtime": 14.4276, |
|
"eval_samples_per_second": 562.324, |
|
"eval_steps_per_second": 2.218, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.48908740282058716, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.3385593891143799, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0441, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9862899192582927, |
|
"eval_f1": 0.5084074168148336, |
|
"eval_loss": 0.042144790291786194, |
|
"eval_precision": 0.7553207547169811, |
|
"eval_recall": 0.38315467075038284, |
|
"eval_runtime": 14.4543, |
|
"eval_samples_per_second": 561.288, |
|
"eval_steps_per_second": 2.214, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.30710557103157043, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.039, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.38062548637390137, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0391, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9867758712779688, |
|
"eval_f1": 0.5580910898589149, |
|
"eval_loss": 0.040399424731731415, |
|
"eval_precision": 0.7310840982386505, |
|
"eval_recall": 0.45130168453292496, |
|
"eval_runtime": 14.4294, |
|
"eval_samples_per_second": 562.253, |
|
"eval_steps_per_second": 2.218, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.5038283467292786, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0386, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.3748466968536377, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0372, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9870223892121485, |
|
"eval_f1": 0.5567598954804994, |
|
"eval_loss": 0.039298899471759796, |
|
"eval_precision": 0.7563765448330265, |
|
"eval_recall": 0.4405053598774885, |
|
"eval_runtime": 14.3787, |
|
"eval_samples_per_second": 564.236, |
|
"eval_steps_per_second": 2.226, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.4667779505252838, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0357, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.341769278049469, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0336, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9872292375937016, |
|
"eval_f1": 0.5748514291104613, |
|
"eval_loss": 0.03821622207760811, |
|
"eval_precision": 0.7484647506755097, |
|
"eval_recall": 0.4666156202143951, |
|
"eval_runtime": 14.4085, |
|
"eval_samples_per_second": 563.072, |
|
"eval_steps_per_second": 2.221, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.4473079442977905, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0336, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.4554208219051361, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9874205015081514, |
|
"eval_f1": 0.5938429166094872, |
|
"eval_loss": 0.0375286303460598, |
|
"eval_precision": 0.7375298261561186, |
|
"eval_recall": 0.4970137825421133, |
|
"eval_runtime": 14.4989, |
|
"eval_samples_per_second": 559.558, |
|
"eval_steps_per_second": 2.207, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.4344967007637024, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0306, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.38051480054855347, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0297, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.987519675389718, |
|
"eval_f1": 0.6078789227687514, |
|
"eval_loss": 0.03680593520402908, |
|
"eval_precision": 0.7259968102073365, |
|
"eval_recall": 0.5228177641653905, |
|
"eval_runtime": 14.3385, |
|
"eval_samples_per_second": 565.819, |
|
"eval_steps_per_second": 2.232, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.4212055504322052, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0301, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.5279490351676941, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0296, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.9875210921594546, |
|
"eval_f1": 0.5898677593592848, |
|
"eval_loss": 0.037628598511219025, |
|
"eval_precision": 0.7526140684410646, |
|
"eval_recall": 0.48499234303215927, |
|
"eval_runtime": 14.4084, |
|
"eval_samples_per_second": 563.076, |
|
"eval_steps_per_second": 2.221, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.3991893231868744, |
|
"learning_rate": 1.8767455621301777e-05, |
|
"loss": 0.0273, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.18142229318618774, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.0263, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.987684020679171, |
|
"eval_f1": 0.6210714441393139, |
|
"eval_loss": 0.03721313551068306, |
|
"eval_precision": 0.7209796579293594, |
|
"eval_recall": 0.5454823889739663, |
|
"eval_runtime": 14.4932, |
|
"eval_samples_per_second": 559.778, |
|
"eval_steps_per_second": 2.208, |
|
"step": 13000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 293677205951196.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|