gemma7b-summarize-gpt4o-8k / trainer_state.json
juyongjiang's picture
update model checkpoint
dcd490a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07142857142857142,
"grad_norm": 179.0,
"learning_rate": 1.4285714285714285e-05,
"loss": 46.5124,
"step": 1
},
{
"epoch": 0.35714285714285715,
"grad_norm": 134.0,
"learning_rate": 7.142857142857143e-05,
"loss": 45.4999,
"step": 5
},
{
"epoch": 0.7142857142857143,
"grad_norm": 19.5,
"learning_rate": 0.00014285714285714287,
"loss": 30.8653,
"step": 10
},
{
"epoch": 1.0,
"eval_loss": 10.663816452026367,
"eval_runtime": 0.2491,
"eval_samples_per_second": 40.143,
"eval_steps_per_second": 4.014,
"step": 14
},
{
"epoch": 1.0714285714285714,
"grad_norm": 12.125,
"learning_rate": 0.00019996891820008164,
"loss": 22.9738,
"step": 15
},
{
"epoch": 1.4285714285714286,
"grad_norm": 4.53125,
"learning_rate": 0.00019888308262251285,
"loss": 19.8669,
"step": 20
},
{
"epoch": 1.7857142857142856,
"grad_norm": 5.5625,
"learning_rate": 0.0001962624246950012,
"loss": 18.5328,
"step": 25
},
{
"epoch": 2.0,
"eval_loss": 7.303110599517822,
"eval_runtime": 0.2339,
"eval_samples_per_second": 42.753,
"eval_steps_per_second": 4.275,
"step": 28
},
{
"epoch": 2.142857142857143,
"grad_norm": 10.25,
"learning_rate": 0.00019214762118704076,
"loss": 17.3052,
"step": 30
},
{
"epoch": 2.5,
"grad_norm": 17.5,
"learning_rate": 0.00018660254037844388,
"loss": 15.1835,
"step": 35
},
{
"epoch": 2.857142857142857,
"grad_norm": 24.875,
"learning_rate": 0.00017971325072229226,
"loss": 11.486,
"step": 40
},
{
"epoch": 3.0,
"eval_loss": 6.628046989440918,
"eval_runtime": 0.2415,
"eval_samples_per_second": 41.402,
"eval_steps_per_second": 4.14,
"step": 42
},
{
"epoch": 3.2142857142857144,
"grad_norm": 26.5,
"learning_rate": 0.00017158668492597186,
"loss": 6.607,
"step": 45
},
{
"epoch": 3.571428571428571,
"grad_norm": 7.125,
"learning_rate": 0.00016234898018587337,
"loss": 3.2178,
"step": 50
},
{
"epoch": 3.928571428571429,
"grad_norm": 5.4375,
"learning_rate": 0.0001521435203379498,
"loss": 2.4959,
"step": 55
},
{
"epoch": 4.0,
"eval_loss": 3.508697032928467,
"eval_runtime": 0.2457,
"eval_samples_per_second": 40.698,
"eval_steps_per_second": 4.07,
"step": 56
},
{
"epoch": 4.285714285714286,
"grad_norm": 2.421875,
"learning_rate": 0.00014112871031306119,
"loss": 2.101,
"step": 60
},
{
"epoch": 4.642857142857143,
"grad_norm": 1.03125,
"learning_rate": 0.00012947551744109043,
"loss": 1.8643,
"step": 65
},
{
"epoch": 5.0,
"grad_norm": 0.65234375,
"learning_rate": 0.00011736481776669306,
"loss": 1.742,
"step": 70
},
{
"epoch": 5.0,
"eval_loss": 3.0216174125671387,
"eval_runtime": 0.2345,
"eval_samples_per_second": 42.653,
"eval_steps_per_second": 4.265,
"step": 70
},
{
"epoch": 5.357142857142857,
"grad_norm": 0.75390625,
"learning_rate": 0.00010498458856606972,
"loss": 1.6462,
"step": 75
},
{
"epoch": 5.714285714285714,
"grad_norm": 0.73046875,
"learning_rate": 9.252699064135758e-05,
"loss": 1.5971,
"step": 80
},
{
"epoch": 6.0,
"eval_loss": 2.880188465118408,
"eval_runtime": 0.2347,
"eval_samples_per_second": 42.612,
"eval_steps_per_second": 4.261,
"step": 84
},
{
"epoch": 6.071428571428571,
"grad_norm": 0.474609375,
"learning_rate": 8.018538568006027e-05,
"loss": 1.5391,
"step": 85
},
{
"epoch": 6.428571428571429,
"grad_norm": 0.62109375,
"learning_rate": 6.815133497483157e-05,
"loss": 1.5124,
"step": 90
},
{
"epoch": 6.785714285714286,
"grad_norm": 0.408203125,
"learning_rate": 5.6611626088244194e-05,
"loss": 1.4792,
"step": 95
},
{
"epoch": 7.0,
"eval_loss": 2.8307275772094727,
"eval_runtime": 0.235,
"eval_samples_per_second": 42.562,
"eval_steps_per_second": 4.256,
"step": 98
},
{
"epoch": 7.142857142857143,
"grad_norm": 0.443359375,
"learning_rate": 4.574537361342407e-05,
"loss": 1.4514,
"step": 100
},
{
"epoch": 7.5,
"grad_norm": 0.60546875,
"learning_rate": 3.5721239031346066e-05,
"loss": 1.4383,
"step": 105
},
{
"epoch": 7.857142857142857,
"grad_norm": 0.8828125,
"learning_rate": 2.669481281701739e-05,
"loss": 1.4333,
"step": 110
},
{
"epoch": 8.0,
"eval_loss": 2.808138608932495,
"eval_runtime": 0.2344,
"eval_samples_per_second": 42.659,
"eval_steps_per_second": 4.266,
"step": 112
},
{
"epoch": 8.214285714285714,
"grad_norm": 0.482421875,
"learning_rate": 1.880619942841435e-05,
"loss": 1.4232,
"step": 115
},
{
"epoch": 8.571428571428571,
"grad_norm": 0.78125,
"learning_rate": 1.2177842662977135e-05,
"loss": 1.4159,
"step": 120
},
{
"epoch": 8.928571428571429,
"grad_norm": 0.53515625,
"learning_rate": 6.9126251355795864e-06,
"loss": 1.4129,
"step": 125
},
{
"epoch": 9.0,
"eval_loss": 2.8151421546936035,
"eval_runtime": 0.2356,
"eval_samples_per_second": 42.443,
"eval_steps_per_second": 4.244,
"step": 126
},
{
"epoch": 9.285714285714286,
"grad_norm": 0.427734375,
"learning_rate": 3.092271377092215e-06,
"loss": 1.4193,
"step": 130
},
{
"epoch": 9.642857142857142,
"grad_norm": 0.6875,
"learning_rate": 7.760793399827937e-07,
"loss": 1.4139,
"step": 135
},
{
"epoch": 10.0,
"grad_norm": 0.376953125,
"learning_rate": 0.0,
"loss": 1.4048,
"step": 140
},
{
"epoch": 10.0,
"eval_loss": 2.812880277633667,
"eval_runtime": 0.2405,
"eval_samples_per_second": 41.578,
"eval_steps_per_second": 4.158,
"step": 140
},
{
"epoch": 10.0,
"step": 140,
"total_flos": 4.268849030789857e+17,
"train_loss": 7.876109651156834,
"train_runtime": 340.0833,
"train_samples_per_second": 25.758,
"train_steps_per_second": 0.412
}
],
"logging_steps": 5,
"max_steps": 140,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"total_flos": 4.268849030789857e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}