|
{ |
|
"best_metric": 0.037236765027046204, |
|
"best_model_checkpoint": "doc-topic-model_eval-02_train-00/checkpoint-14000", |
|
"epoch": 9.368836291913215, |
|
"eval_steps": 1000, |
|
"global_step": 19000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.32839319109916687, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1668, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.4088590443134308, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0929, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.09118108451366425, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1097, |
|
"eval_samples_per_second": 669.709, |
|
"eval_steps_per_second": 2.642, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.33391374349594116, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0874, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3162483870983124, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0785, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.9813654208653996, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.07077465206384659, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1773, |
|
"eval_samples_per_second": 665.993, |
|
"eval_steps_per_second": 2.628, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.31390729546546936, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0684, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.40165144205093384, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0622, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.9822994174922404, |
|
"eval_f1": 0.11091336228376165, |
|
"eval_loss": 0.05758311599493027, |
|
"eval_precision": 0.8665183537263627, |
|
"eval_recall": 0.059248554913294796, |
|
"eval_runtime": 14.3679, |
|
"eval_samples_per_second": 564.454, |
|
"eval_steps_per_second": 2.227, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.4135937988758087, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0584, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.324859082698822, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0542, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9842311889677849, |
|
"eval_f1": 0.3405642484589853, |
|
"eval_loss": 0.05005618929862976, |
|
"eval_precision": 0.7714822771213748, |
|
"eval_recall": 0.21851232126559172, |
|
"eval_runtime": 14.3627, |
|
"eval_samples_per_second": 564.655, |
|
"eval_steps_per_second": 2.228, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3279891312122345, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0488, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.4424910843372345, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.048, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9852799863939793, |
|
"eval_f1": 0.42770553229005953, |
|
"eval_loss": 0.046073004603385925, |
|
"eval_precision": 0.7762, |
|
"eval_recall": 0.29517797383632494, |
|
"eval_runtime": 14.498, |
|
"eval_samples_per_second": 559.388, |
|
"eval_steps_per_second": 2.207, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.3125520646572113, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.4686921536922455, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0436, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9861473702113185, |
|
"eval_f1": 0.5112022404480896, |
|
"eval_loss": 0.04339677840471268, |
|
"eval_precision": 0.7463492990654206, |
|
"eval_recall": 0.38872832369942195, |
|
"eval_runtime": 14.5997, |
|
"eval_samples_per_second": 555.492, |
|
"eval_steps_per_second": 2.192, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.3562796711921692, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.0412, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.24857360124588013, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0384, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9866873591564267, |
|
"eval_f1": 0.5495612142137822, |
|
"eval_loss": 0.0414077453315258, |
|
"eval_precision": 0.7436729396495781, |
|
"eval_recall": 0.4358077274110131, |
|
"eval_runtime": 14.527, |
|
"eval_samples_per_second": 558.269, |
|
"eval_steps_per_second": 2.203, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.47940972447395325, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0394, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.39106041193008423, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0385, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9866717689244158, |
|
"eval_f1": 0.5362919132149901, |
|
"eval_loss": 0.040173791348934174, |
|
"eval_precision": 0.7624789680314077, |
|
"eval_recall": 0.41359902646790386, |
|
"eval_runtime": 14.5683, |
|
"eval_samples_per_second": 556.687, |
|
"eval_steps_per_second": 2.197, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.4470931887626648, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0342, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.33709824085235596, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0343, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9869977465028275, |
|
"eval_f1": 0.5633092155369384, |
|
"eval_loss": 0.03959447145462036, |
|
"eval_precision": 0.7527989821882952, |
|
"eval_recall": 0.45003042287800427, |
|
"eval_runtime": 14.5424, |
|
"eval_samples_per_second": 557.678, |
|
"eval_steps_per_second": 2.2, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.3310143053531647, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0343, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.5156289935112, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0343, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9872230962200774, |
|
"eval_f1": 0.57721708952774, |
|
"eval_loss": 0.03884171321988106, |
|
"eval_precision": 0.752782874617737, |
|
"eval_recall": 0.46805597809552785, |
|
"eval_runtime": 14.5796, |
|
"eval_samples_per_second": 556.255, |
|
"eval_steps_per_second": 2.195, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.177514792899408, |
|
"grad_norm": 0.3030126988887787, |
|
"learning_rate": 1.896459566074951e-05, |
|
"loss": 0.0314, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"grad_norm": 0.49180835485458374, |
|
"learning_rate": 1.891528599605523e-05, |
|
"loss": 0.0304, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.424063116370808, |
|
"eval_accuracy": 0.9870983743639894, |
|
"eval_f1": 0.5816059199338144, |
|
"eval_loss": 0.038809314370155334, |
|
"eval_precision": 0.7349285631316065, |
|
"eval_recall": 0.48121387283236994, |
|
"eval_runtime": 14.3105, |
|
"eval_samples_per_second": 566.716, |
|
"eval_steps_per_second": 2.236, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.670611439842209, |
|
"grad_norm": 0.4685238003730774, |
|
"learning_rate": 1.886597633136095e-05, |
|
"loss": 0.0297, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"grad_norm": 0.46912193298339844, |
|
"learning_rate": 1.881666666666667e-05, |
|
"loss": 0.0299, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.9171597633136095, |
|
"eval_accuracy": 0.987516476040648, |
|
"eval_f1": 0.6071364852809991, |
|
"eval_loss": 0.03742339462041855, |
|
"eval_precision": 0.7340379637618637, |
|
"eval_recall": 0.5176452692424703, |
|
"eval_runtime": 14.2866, |
|
"eval_samples_per_second": 567.665, |
|
"eval_steps_per_second": 2.24, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.16370808678501, |
|
"grad_norm": 0.3353117108345032, |
|
"learning_rate": 1.876735700197239e-05, |
|
"loss": 0.0276, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"grad_norm": 0.2552751898765564, |
|
"learning_rate": 1.8718145956607497e-05, |
|
"loss": 0.0265, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.410256410256411, |
|
"eval_accuracy": 0.9874682880507958, |
|
"eval_f1": 0.6134813778632628, |
|
"eval_loss": 0.03768361359834671, |
|
"eval_precision": 0.7213199013157895, |
|
"eval_recall": 0.533693337389717, |
|
"eval_runtime": 14.3396, |
|
"eval_samples_per_second": 565.567, |
|
"eval_steps_per_second": 2.232, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.65680473372781, |
|
"grad_norm": 0.2560911774635315, |
|
"learning_rate": 1.8668836291913217e-05, |
|
"loss": 0.0273, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"grad_norm": 0.4542127549648285, |
|
"learning_rate": 1.8619526627218937e-05, |
|
"loss": 0.0261, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 6.903353057199211, |
|
"eval_accuracy": 0.9876454497781935, |
|
"eval_f1": 0.6116630284670558, |
|
"eval_loss": 0.037236765027046204, |
|
"eval_precision": 0.7382514248843962, |
|
"eval_recall": 0.5221326437480985, |
|
"eval_runtime": 14.3666, |
|
"eval_samples_per_second": 564.504, |
|
"eval_steps_per_second": 2.227, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.149901380670611, |
|
"grad_norm": 0.2716215252876282, |
|
"learning_rate": 1.8570216962524657e-05, |
|
"loss": 0.025, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"grad_norm": 0.2959195077419281, |
|
"learning_rate": 1.8520907297830377e-05, |
|
"loss": 0.0236, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.396449704142012, |
|
"eval_accuracy": 0.9876511189534702, |
|
"eval_f1": 0.6207286814956688, |
|
"eval_loss": 0.037736888974905014, |
|
"eval_precision": 0.7256997455470738, |
|
"eval_recall": 0.5422878004259203, |
|
"eval_runtime": 14.6872, |
|
"eval_samples_per_second": 552.183, |
|
"eval_steps_per_second": 2.179, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 7.642998027613412, |
|
"grad_norm": 0.44261014461517334, |
|
"learning_rate": 1.8471696252465485e-05, |
|
"loss": 0.0225, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"grad_norm": 0.721012532711029, |
|
"learning_rate": 1.8422386587771205e-05, |
|
"loss": 0.0236, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 7.889546351084813, |
|
"eval_accuracy": 0.9878353671499639, |
|
"eval_f1": 0.6227750186788555, |
|
"eval_loss": 0.03772629797458649, |
|
"eval_precision": 0.7376366475793857, |
|
"eval_recall": 0.5388652266504411, |
|
"eval_runtime": 14.358, |
|
"eval_samples_per_second": 564.84, |
|
"eval_steps_per_second": 2.229, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.136094674556213, |
|
"grad_norm": 0.2590983510017395, |
|
"learning_rate": 1.8373076923076926e-05, |
|
"loss": 0.0219, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"grad_norm": 0.6246200203895569, |
|
"learning_rate": 1.8323767258382646e-05, |
|
"loss": 0.0215, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.382642998027613, |
|
"eval_accuracy": 0.9878552092634324, |
|
"eval_f1": 0.6335685268334402, |
|
"eval_loss": 0.03787342831492424, |
|
"eval_precision": 0.7236495066914135, |
|
"eval_recall": 0.5634317006388805, |
|
"eval_runtime": 14.4229, |
|
"eval_samples_per_second": 562.299, |
|
"eval_steps_per_second": 2.219, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 8.629191321499015, |
|
"grad_norm": 0.46473428606987, |
|
"learning_rate": 1.8274457593688362e-05, |
|
"loss": 0.0204, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"grad_norm": 0.5553109049797058, |
|
"learning_rate": 1.822524654832347e-05, |
|
"loss": 0.0216, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 8.875739644970414, |
|
"eval_accuracy": 0.987812690448857, |
|
"eval_f1": 0.6330075540950023, |
|
"eval_loss": 0.03817650303244591, |
|
"eval_precision": 0.7211903141106681, |
|
"eval_recall": 0.5640401581989656, |
|
"eval_runtime": 14.4328, |
|
"eval_samples_per_second": 561.914, |
|
"eval_steps_per_second": 2.217, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.122287968441814, |
|
"grad_norm": 0.3819935917854309, |
|
"learning_rate": 1.8175936883629194e-05, |
|
"loss": 0.0194, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 9.368836291913215, |
|
"grad_norm": 0.5073143243789673, |
|
"learning_rate": 1.8126627218934914e-05, |
|
"loss": 0.0177, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 9.368836291913215, |
|
"eval_accuracy": 0.9878807205521777, |
|
"eval_f1": 0.6370389235536313, |
|
"eval_loss": 0.038153521716594696, |
|
"eval_precision": 0.7207761021995965, |
|
"eval_recall": 0.5707331913599026, |
|
"eval_runtime": 14.4357, |
|
"eval_samples_per_second": 561.802, |
|
"eval_steps_per_second": 2.217, |
|
"step": 19000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 430075849523328.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|