|
{ |
|
"best_metric": 0.03731473535299301, |
|
"best_model_checkpoint": "doc-topic-model_eval-01_train-04/checkpoint-13000", |
|
"epoch": 6.407097092163627, |
|
"eval_steps": 1000, |
|
"global_step": 13000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2464268112370626, |
|
"grad_norm": 0.36145079135894775, |
|
"learning_rate": 1.9950714637752587e-05, |
|
"loss": 0.1655, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4928536224741252, |
|
"grad_norm": 0.3764462471008301, |
|
"learning_rate": 1.990142927550518e-05, |
|
"loss": 0.0934, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4928536224741252, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.09022163599729538, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1169, |
|
"eval_samples_per_second": 669.315, |
|
"eval_steps_per_second": 2.641, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7392804337111878, |
|
"grad_norm": 0.3353608548641205, |
|
"learning_rate": 1.9852143913257764e-05, |
|
"loss": 0.0875, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9857072449482503, |
|
"grad_norm": 0.3118976652622223, |
|
"learning_rate": 1.980285855101035e-05, |
|
"loss": 0.0778, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9857072449482503, |
|
"eval_accuracy": 0.981383845685049, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.07012662291526794, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1219, |
|
"eval_samples_per_second": 669.036, |
|
"eval_steps_per_second": 2.64, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232134056185313, |
|
"grad_norm": 0.36931222677230835, |
|
"learning_rate": 1.975357318876294e-05, |
|
"loss": 0.0682, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4785608674223756, |
|
"grad_norm": 0.3769352436065674, |
|
"learning_rate": 1.9704287826515527e-05, |
|
"loss": 0.0618, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4785608674223756, |
|
"eval_accuracy": 0.9828110605609649, |
|
"eval_f1": 0.1748537215947748, |
|
"eval_loss": 0.05648113414645195, |
|
"eval_precision": 0.8221369161868202, |
|
"eval_recall": 0.09783022459078797, |
|
"eval_runtime": 14.4938, |
|
"eval_samples_per_second": 559.549, |
|
"eval_steps_per_second": 2.208, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.724987678659438, |
|
"grad_norm": 0.3530617356300354, |
|
"learning_rate": 1.9655002464268113e-05, |
|
"loss": 0.0565, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.9714144898965007, |
|
"grad_norm": 0.3648615777492523, |
|
"learning_rate": 1.96057171020207e-05, |
|
"loss": 0.0535, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.9714144898965007, |
|
"eval_accuracy": 0.9842326062616041, |
|
"eval_f1": 0.33010176431625216, |
|
"eval_loss": 0.04882097616791725, |
|
"eval_precision": 0.7894585253456221, |
|
"eval_recall": 0.2086791016368481, |
|
"eval_runtime": 14.3902, |
|
"eval_samples_per_second": 563.579, |
|
"eval_steps_per_second": 2.224, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2178413011335634, |
|
"grad_norm": 0.4629921317100525, |
|
"learning_rate": 1.955643173977329e-05, |
|
"loss": 0.0479, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.464268112370626, |
|
"grad_norm": 0.38679325580596924, |
|
"learning_rate": 1.9507146377525875e-05, |
|
"loss": 0.0473, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.464268112370626, |
|
"eval_accuracy": 0.985597460209476, |
|
"eval_f1": 0.4667856018469934, |
|
"eval_loss": 0.04518100246787071, |
|
"eval_precision": 0.7509707918284653, |
|
"eval_recall": 0.33863722877807384, |
|
"eval_runtime": 14.416, |
|
"eval_samples_per_second": 562.568, |
|
"eval_steps_per_second": 2.22, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.7106949236076883, |
|
"grad_norm": 0.35379377007484436, |
|
"learning_rate": 1.9457861015278464e-05, |
|
"loss": 0.0442, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.957121734844751, |
|
"grad_norm": 0.3872750997543335, |
|
"learning_rate": 1.9408575653031053e-05, |
|
"loss": 0.0436, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.957121734844751, |
|
"eval_accuracy": 0.985956035545729, |
|
"eval_f1": 0.49631474609871395, |
|
"eval_loss": 0.042364452034235, |
|
"eval_precision": 0.7467115325787703, |
|
"eval_recall": 0.3716787209744956, |
|
"eval_runtime": 14.5806, |
|
"eval_samples_per_second": 556.218, |
|
"eval_steps_per_second": 2.195, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2035485460818136, |
|
"grad_norm": 0.4312683939933777, |
|
"learning_rate": 1.9359290290783638e-05, |
|
"loss": 0.0399, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.449975357318876, |
|
"grad_norm": 0.46780943870544434, |
|
"learning_rate": 1.9310004928536227e-05, |
|
"loss": 0.0389, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.449975357318876, |
|
"eval_accuracy": 0.9865116147228482, |
|
"eval_f1": 0.5325868081135504, |
|
"eval_loss": 0.04031926766037941, |
|
"eval_precision": 0.7503459728757266, |
|
"eval_recall": 0.41279025504377614, |
|
"eval_runtime": 14.5279, |
|
"eval_samples_per_second": 558.236, |
|
"eval_steps_per_second": 2.203, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.696402168555939, |
|
"grad_norm": 0.3296278417110443, |
|
"learning_rate": 1.9260719566288815e-05, |
|
"loss": 0.0379, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9428289797930014, |
|
"grad_norm": 0.22207264602184296, |
|
"learning_rate": 1.92114342040414e-05, |
|
"loss": 0.0376, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9428289797930014, |
|
"eval_accuracy": 0.9864903553155605, |
|
"eval_f1": 0.5587037037037037, |
|
"eval_loss": 0.03964832052588463, |
|
"eval_precision": 0.712817483756645, |
|
"eval_recall": 0.4593833269889608, |
|
"eval_runtime": 14.4135, |
|
"eval_samples_per_second": 562.668, |
|
"eval_steps_per_second": 2.22, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.189255791030064, |
|
"grad_norm": 0.22761479020118713, |
|
"learning_rate": 1.916214884179399e-05, |
|
"loss": 0.034, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.435682602267127, |
|
"grad_norm": 0.4061233699321747, |
|
"learning_rate": 1.9112863479546578e-05, |
|
"loss": 0.0339, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.435682602267127, |
|
"eval_accuracy": 0.9867426336153748, |
|
"eval_f1": 0.5582735171892709, |
|
"eval_loss": 0.03883419558405876, |
|
"eval_precision": 0.735107573684865, |
|
"eval_recall": 0.45001903311762464, |
|
"eval_runtime": 14.3737, |
|
"eval_samples_per_second": 564.226, |
|
"eval_steps_per_second": 2.226, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.68210941350419, |
|
"grad_norm": 0.31547975540161133, |
|
"learning_rate": 1.9063578117299164e-05, |
|
"loss": 0.0328, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.928536224741252, |
|
"grad_norm": 0.307452529668808, |
|
"learning_rate": 1.9014292755051752e-05, |
|
"loss": 0.0337, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.928536224741252, |
|
"eval_accuracy": 0.9871139645960004, |
|
"eval_f1": 0.5737059264816204, |
|
"eval_loss": 0.038497649133205414, |
|
"eval_precision": 0.7467350176980349, |
|
"eval_recall": 0.46577845451084887, |
|
"eval_runtime": 14.3777, |
|
"eval_samples_per_second": 564.067, |
|
"eval_steps_per_second": 2.226, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.1749630359783145, |
|
"grad_norm": 0.3740977942943573, |
|
"learning_rate": 1.8965007392804338e-05, |
|
"loss": 0.0303, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 5.421389847215377, |
|
"grad_norm": 0.37522149085998535, |
|
"learning_rate": 1.891582060128142e-05, |
|
"loss": 0.0295, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.421389847215377, |
|
"eval_accuracy": 0.9871380585909265, |
|
"eval_f1": 0.6012916831422169, |
|
"eval_loss": 0.037736039608716965, |
|
"eval_precision": 0.7108871805526699, |
|
"eval_recall": 0.520974495622383, |
|
"eval_runtime": 14.3748, |
|
"eval_samples_per_second": 564.184, |
|
"eval_steps_per_second": 2.226, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 5.667816658452439, |
|
"grad_norm": 0.3089052736759186, |
|
"learning_rate": 1.886653523903401e-05, |
|
"loss": 0.029, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 5.914243469689502, |
|
"grad_norm": 0.3211393356323242, |
|
"learning_rate": 1.8817249876786596e-05, |
|
"loss": 0.0305, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.914243469689502, |
|
"eval_accuracy": 0.9871366412971073, |
|
"eval_f1": 0.5951106352605282, |
|
"eval_loss": 0.03832274675369263, |
|
"eval_precision": 0.7186725568365477, |
|
"eval_recall": 0.5078035782261134, |
|
"eval_runtime": 14.5037, |
|
"eval_samples_per_second": 559.166, |
|
"eval_steps_per_second": 2.206, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.160670280926565, |
|
"grad_norm": 0.16942404210567474, |
|
"learning_rate": 1.8767964514539182e-05, |
|
"loss": 0.0275, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 6.407097092163627, |
|
"grad_norm": 0.3766990303993225, |
|
"learning_rate": 1.8718777723016266e-05, |
|
"loss": 0.0254, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 6.407097092163627, |
|
"eval_accuracy": 0.9874257692362204, |
|
"eval_f1": 0.6115246518959628, |
|
"eval_loss": 0.03731473535299301, |
|
"eval_precision": 0.7196743275275688, |
|
"eval_recall": 0.5316330414921964, |
|
"eval_runtime": 14.5546, |
|
"eval_samples_per_second": 557.213, |
|
"eval_steps_per_second": 2.199, |
|
"step": 13000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202900, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 294847422552900.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|