zephyr-7b-sft-lora / trainer_state.json
sambar's picture
Model save
f67c280
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6699507389162561,
"eval_steps": 500,
"global_step": 136,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.9998802517966852e-05,
"loss": 1.1243,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 1.997007728639956e-05,
"loss": 1.122,
"step": 5
},
{
"epoch": 0.05,
"learning_rate": 1.9880488219356086e-05,
"loss": 1.1164,
"step": 10
},
{
"epoch": 0.07,
"learning_rate": 1.973176894846855e-05,
"loss": 1.0903,
"step": 15
},
{
"epoch": 0.1,
"learning_rate": 1.9524809490566878e-05,
"loss": 1.0802,
"step": 20
},
{
"epoch": 0.12,
"learning_rate": 1.926084840336821e-05,
"loss": 1.0711,
"step": 25
},
{
"epoch": 0.15,
"learning_rate": 1.894146537327533e-05,
"loss": 1.0548,
"step": 30
},
{
"epoch": 0.17,
"learning_rate": 1.8568571761675893e-05,
"loss": 1.0363,
"step": 35
},
{
"epoch": 0.2,
"learning_rate": 1.814439916631857e-05,
"loss": 1.038,
"step": 40
},
{
"epoch": 0.22,
"learning_rate": 1.7671486066220965e-05,
"loss": 1.0281,
"step": 45
},
{
"epoch": 0.25,
"learning_rate": 1.7152662630033506e-05,
"loss": 1.0167,
"step": 50
},
{
"epoch": 0.27,
"learning_rate": 1.659103377877423e-05,
"loss": 1.0258,
"step": 55
},
{
"epoch": 0.3,
"learning_rate": 1.598996060429634e-05,
"loss": 1.0134,
"step": 60
},
{
"epoch": 0.32,
"learning_rate": 1.5353040254690396e-05,
"loss": 1.0172,
"step": 65
},
{
"epoch": 0.34,
"learning_rate": 1.4684084406997903e-05,
"loss": 1.0136,
"step": 70
},
{
"epoch": 0.37,
"learning_rate": 1.3987096456067236e-05,
"loss": 1.0109,
"step": 75
},
{
"epoch": 0.39,
"learning_rate": 1.3266247556066122e-05,
"loss": 0.9972,
"step": 80
},
{
"epoch": 0.42,
"learning_rate": 1.252585165803135e-05,
"loss": 1.0118,
"step": 85
},
{
"epoch": 0.44,
"learning_rate": 1.1770339692844484e-05,
"loss": 1.0013,
"step": 90
},
{
"epoch": 0.47,
"learning_rate": 1.1004233054136726e-05,
"loss": 1.0107,
"step": 95
},
{
"epoch": 0.49,
"learning_rate": 1.0232116539815558e-05,
"loss": 1.0039,
"step": 100
},
{
"epoch": 0.52,
"learning_rate": 9.458610914145826e-06,
"loss": 1.0032,
"step": 105
},
{
"epoch": 0.54,
"learning_rate": 8.688345254588579e-06,
"loss": 1.008,
"step": 110
},
{
"epoch": 0.57,
"learning_rate": 7.92592924888925e-06,
"loss": 1.0066,
"step": 115
},
{
"epoch": 0.59,
"learning_rate": 7.175925608204428e-06,
"loss": 1.006,
"step": 120
},
{
"epoch": 0.62,
"learning_rate": 6.442822761362015e-06,
"loss": 1.0059,
"step": 125
},
{
"epoch": 0.64,
"learning_rate": 5.731007993667155e-06,
"loss": 1.001,
"step": 130
},
{
"epoch": 0.67,
"learning_rate": 5.044741191005908e-06,
"loss": 0.9999,
"step": 135
},
{
"epoch": 0.67,
"eval_loss": 1.0052766799926758,
"eval_runtime": 1465.7345,
"eval_samples_per_second": 15.767,
"eval_steps_per_second": 0.986,
"step": 136
},
{
"epoch": 0.67,
"step": 136,
"total_flos": 1.227587501862106e+19,
"train_loss": 1.0304046939401066,
"train_runtime": 43361.1536,
"train_samples_per_second": 4.794,
"train_steps_per_second": 0.005
}
],
"logging_steps": 5,
"max_steps": 203,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 1.227587501862106e+19,
"trial_name": null,
"trial_params": null
}