|
{ |
|
"best_metric": 0.038305457681417465, |
|
"best_model_checkpoint": "doc-topic-model_eval-04_train-00/checkpoint-10000", |
|
"epoch": 4.930966469428008, |
|
"eval_steps": 1000, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2465483234714004, |
|
"grad_norm": 0.32839319109916687, |
|
"learning_rate": 1.9950690335305722e-05, |
|
"loss": 0.1668, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"grad_norm": 0.4088590443134308, |
|
"learning_rate": 1.9901380670611442e-05, |
|
"loss": 0.0929, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4930966469428008, |
|
"eval_accuracy": 0.981496987239155, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.09063737094402313, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.1099, |
|
"eval_samples_per_second": 669.945, |
|
"eval_steps_per_second": 2.642, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7396449704142012, |
|
"grad_norm": 0.33391374349594116, |
|
"learning_rate": 1.9852071005917162e-05, |
|
"loss": 0.0874, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"grad_norm": 0.3162483870983124, |
|
"learning_rate": 1.980276134122288e-05, |
|
"loss": 0.0785, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9861932938856016, |
|
"eval_accuracy": 0.981496987239155, |
|
"eval_f1": 0.0, |
|
"eval_loss": 0.0703761950135231, |
|
"eval_precision": 0.0, |
|
"eval_recall": 0.0, |
|
"eval_runtime": 12.2009, |
|
"eval_samples_per_second": 664.948, |
|
"eval_steps_per_second": 2.623, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.232741617357002, |
|
"grad_norm": 0.31390729546546936, |
|
"learning_rate": 1.9753451676528602e-05, |
|
"loss": 0.0684, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"grad_norm": 0.40165144205093384, |
|
"learning_rate": 1.9704142011834322e-05, |
|
"loss": 0.0622, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4792899408284024, |
|
"eval_accuracy": 0.982317296916684, |
|
"eval_f1": 0.10215092439392849, |
|
"eval_loss": 0.057249072939157486, |
|
"eval_precision": 0.8442330558858502, |
|
"eval_recall": 0.05436447166921899, |
|
"eval_runtime": 14.4254, |
|
"eval_samples_per_second": 562.413, |
|
"eval_steps_per_second": 2.218, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.725838264299803, |
|
"grad_norm": 0.4135937988758087, |
|
"learning_rate": 1.965483234714004e-05, |
|
"loss": 0.0584, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"grad_norm": 0.324859082698822, |
|
"learning_rate": 1.9605522682445763e-05, |
|
"loss": 0.0542, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.972386587771203, |
|
"eval_accuracy": 0.9842951074690683, |
|
"eval_f1": 0.338958793010913, |
|
"eval_loss": 0.04988710954785347, |
|
"eval_precision": 0.7662442706929091, |
|
"eval_recall": 0.21761102603369065, |
|
"eval_runtime": 14.4271, |
|
"eval_samples_per_second": 562.343, |
|
"eval_steps_per_second": 2.218, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.2189349112426036, |
|
"grad_norm": 0.3279891312122345, |
|
"learning_rate": 1.9556213017751483e-05, |
|
"loss": 0.0488, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"grad_norm": 0.4424910843372345, |
|
"learning_rate": 1.95069033530572e-05, |
|
"loss": 0.048, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.465483234714004, |
|
"eval_accuracy": 0.9853463506136738, |
|
"eval_f1": 0.4277731673582296, |
|
"eval_loss": 0.045854322612285614, |
|
"eval_precision": 0.7708873379860419, |
|
"eval_recall": 0.2960183767228178, |
|
"eval_runtime": 14.537, |
|
"eval_samples_per_second": 558.095, |
|
"eval_steps_per_second": 2.201, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.712031558185404, |
|
"grad_norm": 0.3125520646572113, |
|
"learning_rate": 1.9457593688362923e-05, |
|
"loss": 0.0453, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"grad_norm": 0.4686921536922455, |
|
"learning_rate": 1.940828402366864e-05, |
|
"loss": 0.0436, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.9585798816568047, |
|
"eval_accuracy": 0.9862800018701361, |
|
"eval_f1": 0.5134646302250804, |
|
"eval_loss": 0.04293818399310112, |
|
"eval_precision": 0.7466393921683226, |
|
"eval_recall": 0.39127105666156203, |
|
"eval_runtime": 14.3533, |
|
"eval_samples_per_second": 565.238, |
|
"eval_steps_per_second": 2.229, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.2051282051282053, |
|
"grad_norm": 0.3562796711921692, |
|
"learning_rate": 1.935897435897436e-05, |
|
"loss": 0.0412, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"grad_norm": 0.24857360124588013, |
|
"learning_rate": 1.930966469428008e-05, |
|
"loss": 0.0384, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.4516765285996054, |
|
"eval_accuracy": 0.9867872054358622, |
|
"eval_f1": 0.551246270811279, |
|
"eval_loss": 0.041109804064035416, |
|
"eval_precision": 0.7417767417767418, |
|
"eval_recall": 0.43859111791730476, |
|
"eval_runtime": 14.583, |
|
"eval_samples_per_second": 556.331, |
|
"eval_steps_per_second": 2.194, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.698224852071006, |
|
"grad_norm": 0.47940972447395325, |
|
"learning_rate": 1.92603550295858e-05, |
|
"loss": 0.0394, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"grad_norm": 0.39106041193008423, |
|
"learning_rate": 1.921104536489152e-05, |
|
"loss": 0.0385, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.9447731755424065, |
|
"eval_accuracy": 0.9868410426858554, |
|
"eval_f1": 0.5391028185788012, |
|
"eval_loss": 0.0395812913775444, |
|
"eval_precision": 0.7659334461364918, |
|
"eval_recall": 0.4159264931087289, |
|
"eval_runtime": 14.388, |
|
"eval_samples_per_second": 563.873, |
|
"eval_steps_per_second": 2.224, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.191321499013807, |
|
"grad_norm": 0.4470931887626648, |
|
"learning_rate": 1.916173570019724e-05, |
|
"loss": 0.0342, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"grad_norm": 0.33709824085235596, |
|
"learning_rate": 1.911242603550296e-05, |
|
"loss": 0.0343, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.437869822485207, |
|
"eval_accuracy": 0.9870167221332019, |
|
"eval_f1": 0.5622014141028091, |
|
"eval_loss": 0.039211541414260864, |
|
"eval_precision": 0.7474593495934959, |
|
"eval_recall": 0.45053598774885145, |
|
"eval_runtime": 14.475, |
|
"eval_samples_per_second": 560.482, |
|
"eval_steps_per_second": 2.211, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 4.684418145956608, |
|
"grad_norm": 0.3310143053531647, |
|
"learning_rate": 1.906311637080868e-05, |
|
"loss": 0.0343, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"grad_norm": 0.5156289935112, |
|
"learning_rate": 1.90138067061144e-05, |
|
"loss": 0.0343, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.930966469428008, |
|
"eval_accuracy": 0.9872320711331749, |
|
"eval_f1": 0.5746649046630168, |
|
"eval_loss": 0.038305457681417465, |
|
"eval_precision": 0.7490157480314961, |
|
"eval_recall": 0.466156202143951, |
|
"eval_runtime": 14.4319, |
|
"eval_samples_per_second": 562.156, |
|
"eval_steps_per_second": 2.217, |
|
"step": 10000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 202800, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 100, |
|
"save_steps": 1000, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 226383014056572.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|