rental-text-llama_3 / checkpoint-360 /trainer_state.json
shawn-padstats's picture
Upload folder using huggingface_hub
1937ea1 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.981366459627329,
"eval_steps": 500,
"global_step": 360,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2070393374741201,
"grad_norm": 0.6150813102722168,
"learning_rate": 0.0001388888888888889,
"loss": 1.2294,
"step": 25
},
{
"epoch": 0.4140786749482402,
"grad_norm": 0.6390131115913391,
"learning_rate": 0.00019135802469135804,
"loss": 0.7522,
"step": 50
},
{
"epoch": 0.6211180124223602,
"grad_norm": 1.1554582118988037,
"learning_rate": 0.00017592592592592595,
"loss": 0.5117,
"step": 75
},
{
"epoch": 0.8281573498964804,
"grad_norm": 0.4650109112262726,
"learning_rate": 0.00016049382716049385,
"loss": 0.4801,
"step": 100
},
{
"epoch": 1.0351966873706004,
"grad_norm": 0.4381057024002075,
"learning_rate": 0.00014506172839506173,
"loss": 0.4299,
"step": 125
},
{
"epoch": 1.2422360248447206,
"grad_norm": 0.5832701921463013,
"learning_rate": 0.00012962962962962963,
"loss": 0.4302,
"step": 150
},
{
"epoch": 1.4492753623188406,
"grad_norm": 0.5579822659492493,
"learning_rate": 0.00011419753086419754,
"loss": 0.3808,
"step": 175
},
{
"epoch": 1.6563146997929605,
"grad_norm": 0.4348982274532318,
"learning_rate": 9.876543209876543e-05,
"loss": 0.3839,
"step": 200
},
{
"epoch": 1.8633540372670807,
"grad_norm": 0.661745548248291,
"learning_rate": 8.333333333333334e-05,
"loss": 0.4496,
"step": 225
},
{
"epoch": 2.070393374741201,
"grad_norm": 0.47886422276496887,
"learning_rate": 6.790123456790123e-05,
"loss": 0.4054,
"step": 250
},
{
"epoch": 2.277432712215321,
"grad_norm": 0.5070476531982422,
"learning_rate": 5.246913580246914e-05,
"loss": 0.4053,
"step": 275
},
{
"epoch": 2.4844720496894412,
"grad_norm": 0.40826812386512756,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.3673,
"step": 300
},
{
"epoch": 2.691511387163561,
"grad_norm": 0.6191437840461731,
"learning_rate": 2.1604938271604937e-05,
"loss": 0.3548,
"step": 325
},
{
"epoch": 2.898550724637681,
"grad_norm": 0.7395160794258118,
"learning_rate": 6.172839506172839e-06,
"loss": 0.3762,
"step": 350
}
],
"logging_steps": 25,
"max_steps": 360,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 6.645907168690176e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}