Yelp-1B / trainer_state.json
WangXFng's picture
Model save
3ea0682 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9991469396459802,
"eval_steps": 500,
"global_step": 3516,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.28435345134001566,
"grad_norm": 0.656255841255188,
"learning_rate": 9.294251565167901e-05,
"loss": 1.2378,
"step": 250
},
{
"epoch": 0.5687069026800313,
"grad_norm": 0.543454647064209,
"learning_rate": 8.582811610700058e-05,
"loss": 0.6375,
"step": 500
},
{
"epoch": 0.8530603540200469,
"grad_norm": 0.43407657742500305,
"learning_rate": 7.871371656232215e-05,
"loss": 0.5158,
"step": 750
},
{
"epoch": 1.1374138053600626,
"grad_norm": 0.4036562740802765,
"learning_rate": 7.159931701764372e-05,
"loss": 0.4885,
"step": 1000
},
{
"epoch": 1.421767256700078,
"grad_norm": 0.40085434913635254,
"learning_rate": 6.448491747296529e-05,
"loss": 0.4759,
"step": 1250
},
{
"epoch": 1.706120708040094,
"grad_norm": 0.4041031002998352,
"learning_rate": 5.737051792828686e-05,
"loss": 0.4629,
"step": 1500
},
{
"epoch": 1.9904741593801094,
"grad_norm": 0.4215051233768463,
"learning_rate": 5.025611838360843e-05,
"loss": 0.4555,
"step": 1750
},
{
"epoch": 2.2748276107201253,
"grad_norm": 0.4411364495754242,
"learning_rate": 4.3141718838929996e-05,
"loss": 0.4451,
"step": 2000
},
{
"epoch": 2.5591810620601407,
"grad_norm": 0.4689200818538666,
"learning_rate": 3.602731929425157e-05,
"loss": 0.433,
"step": 2250
},
{
"epoch": 2.843534513400156,
"grad_norm": 0.4492100775241852,
"learning_rate": 2.8912919749573137e-05,
"loss": 0.4217,
"step": 2500
},
{
"epoch": 3.127887964740172,
"grad_norm": 0.4875541627407074,
"learning_rate": 2.1798520204894708e-05,
"loss": 0.4116,
"step": 2750
},
{
"epoch": 3.4122414160801875,
"grad_norm": 0.48542124032974243,
"learning_rate": 1.4684120660216277e-05,
"loss": 0.4012,
"step": 3000
},
{
"epoch": 3.6965948674202034,
"grad_norm": 0.5120503306388855,
"learning_rate": 7.569721115537849e-06,
"loss": 0.3939,
"step": 3250
},
{
"epoch": 3.980948318760219,
"grad_norm": 0.510686993598938,
"learning_rate": 4.553215708594195e-07,
"loss": 0.3892,
"step": 3500
},
{
"epoch": 3.9991469396459802,
"step": 3516,
"total_flos": 9.473878732962447e+17,
"train_loss": 0.5115500152856959,
"train_runtime": 22624.5633,
"train_samples_per_second": 39.791,
"train_steps_per_second": 0.155
}
],
"logging_steps": 250,
"max_steps": 3516,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.473878732962447e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}