mistral-7b-sft-constitutional-ai / trainer_state.json
moodlep's picture
Model save
63acdf0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9965714285714286,
"eval_steps": 500,
"global_step": 109,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009142857142857144,
"grad_norm": 2.635891914367676,
"learning_rate": 1.8181818181818183e-06,
"loss": 9.7005,
"step": 1
},
{
"epoch": 0.045714285714285714,
"grad_norm": 2.669848680496216,
"learning_rate": 9.090909090909091e-06,
"loss": 9.3658,
"step": 5
},
{
"epoch": 0.09142857142857143,
"grad_norm": 2.1069536209106445,
"learning_rate": 1.8181818181818182e-05,
"loss": 9.1556,
"step": 10
},
{
"epoch": 0.13714285714285715,
"grad_norm": 1.8738576173782349,
"learning_rate": 1.991790013823246e-05,
"loss": 8.9444,
"step": 15
},
{
"epoch": 0.18285714285714286,
"grad_norm": 1.5760283470153809,
"learning_rate": 1.9586678530366607e-05,
"loss": 8.5402,
"step": 20
},
{
"epoch": 0.22857142857142856,
"grad_norm": 1.3902924060821533,
"learning_rate": 1.900968867902419e-05,
"loss": 8.4052,
"step": 25
},
{
"epoch": 0.2742857142857143,
"grad_norm": 1.3036909103393555,
"learning_rate": 1.820172254596956e-05,
"loss": 8.5012,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 1.4191102981567383,
"learning_rate": 1.7183493500977277e-05,
"loss": 8.3836,
"step": 35
},
{
"epoch": 0.3657142857142857,
"grad_norm": 1.3017487525939941,
"learning_rate": 1.598110530491216e-05,
"loss": 8.376,
"step": 40
},
{
"epoch": 0.4114285714285714,
"grad_norm": 0.9578719139099121,
"learning_rate": 1.4625382902408356e-05,
"loss": 8.346,
"step": 45
},
{
"epoch": 0.45714285714285713,
"grad_norm": 0.884999692440033,
"learning_rate": 1.315108218023621e-05,
"loss": 8.1656,
"step": 50
},
{
"epoch": 0.5028571428571429,
"grad_norm": 0.8994832634925842,
"learning_rate": 1.1595998950333794e-05,
"loss": 8.2717,
"step": 55
},
{
"epoch": 0.5485714285714286,
"grad_norm": 0.8586516976356506,
"learning_rate": 1e-05,
"loss": 8.2746,
"step": 60
},
{
"epoch": 0.5942857142857143,
"grad_norm": 0.8844101428985596,
"learning_rate": 8.404001049666211e-06,
"loss": 8.1344,
"step": 65
},
{
"epoch": 0.64,
"grad_norm": 0.857933521270752,
"learning_rate": 6.848917819763794e-06,
"loss": 8.0928,
"step": 70
},
{
"epoch": 0.6857142857142857,
"grad_norm": 0.8832647204399109,
"learning_rate": 5.37461709759165e-06,
"loss": 8.2155,
"step": 75
},
{
"epoch": 0.7314285714285714,
"grad_norm": 0.8684210777282715,
"learning_rate": 4.01889469508784e-06,
"loss": 8.1696,
"step": 80
},
{
"epoch": 0.7771428571428571,
"grad_norm": 0.8870261311531067,
"learning_rate": 2.8165064990227255e-06,
"loss": 8.0587,
"step": 85
},
{
"epoch": 0.8228571428571428,
"grad_norm": 0.8674476146697998,
"learning_rate": 1.7982774540304404e-06,
"loss": 8.2368,
"step": 90
},
{
"epoch": 0.8685714285714285,
"grad_norm": 0.8507274985313416,
"learning_rate": 9.903113209758098e-07,
"loss": 8.081,
"step": 95
},
{
"epoch": 0.9142857142857143,
"grad_norm": 0.8538458347320557,
"learning_rate": 4.133214696333943e-07,
"loss": 8.2492,
"step": 100
},
{
"epoch": 0.96,
"grad_norm": 0.8556557893753052,
"learning_rate": 8.209986176753947e-08,
"loss": 8.2279,
"step": 105
},
{
"epoch": 0.9965714285714286,
"eval_loss": 1.0207911729812622,
"eval_runtime": 10395.2442,
"eval_samples_per_second": 1.484,
"eval_steps_per_second": 0.186,
"step": 109
},
{
"epoch": 0.9965714285714286,
"step": 109,
"total_flos": 6.131289443249684e+17,
"train_loss": 8.393078707773752,
"train_runtime": 26246.6491,
"train_samples_per_second": 0.267,
"train_steps_per_second": 0.004
}
],
"logging_steps": 5,
"max_steps": 109,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.131289443249684e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}