WizardVerseV1 / models /V1 /trainer_state.json
tangmen's picture
Upload folder using huggingface_hub
9f0cc67
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.921962992759453,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 0,
"loss": 0.5933,
"step": 1
},
{
"epoch": 0.1,
"learning_rate": 0,
"loss": 0.6073,
"step": 2
},
{
"epoch": 0.16,
"learning_rate": 0,
"loss": 0.6047,
"step": 3
},
{
"epoch": 0.21,
"learning_rate": 0,
"loss": 0.5982,
"step": 4
},
{
"epoch": 0.26,
"learning_rate": 0,
"loss": 0.5986,
"step": 5
},
{
"epoch": 0.31,
"learning_rate": 0,
"loss": 0.593,
"step": 6
},
{
"epoch": 0.37,
"learning_rate": 0,
"loss": 0.5914,
"step": 7
},
{
"epoch": 0.42,
"learning_rate": 0,
"loss": 0.6023,
"step": 8
},
{
"epoch": 0.47,
"learning_rate": 0,
"loss": 0.6148,
"step": 9
},
{
"epoch": 0.52,
"learning_rate": 0,
"loss": 0.5973,
"step": 10
},
{
"epoch": 0.58,
"learning_rate": 0,
"loss": 0.5975,
"step": 11
},
{
"epoch": 0.63,
"learning_rate": 0,
"loss": 0.5867,
"step": 12
},
{
"epoch": 0.68,
"learning_rate": 0,
"loss": 0.5975,
"step": 13
},
{
"epoch": 0.73,
"learning_rate": 0,
"loss": 0.6077,
"step": 14
},
{
"epoch": 0.78,
"learning_rate": 0.0,
"loss": 0.6097,
"step": 15
},
{
"epoch": 0.84,
"learning_rate": 0.0,
"loss": 0.6477,
"step": 16
},
{
"epoch": 0.89,
"learning_rate": 6.020599913279623e-06,
"loss": 0.649,
"step": 17
},
{
"epoch": 0.94,
"learning_rate": 9.542425094393249e-06,
"loss": 0.647,
"step": 18
},
{
"epoch": 0.99,
"learning_rate": 1.2041199826559246e-05,
"loss": 0.6232,
"step": 19
},
{
"epoch": 1.05,
"learning_rate": 1.3979400086720374e-05,
"loss": 0.5682,
"step": 20
},
{
"epoch": 1.1,
"learning_rate": 1.5563025007672873e-05,
"loss": 0.5723,
"step": 21
},
{
"epoch": 1.15,
"learning_rate": 1.6901960800285137e-05,
"loss": 0.5791,
"step": 22
},
{
"epoch": 1.2,
"learning_rate": 1.806179973983887e-05,
"loss": 0.575,
"step": 23
},
{
"epoch": 1.26,
"learning_rate": 1.9084850188786497e-05,
"loss": 0.5563,
"step": 24
},
{
"epoch": 1.31,
"learning_rate": 1.9999999999999998e-05,
"loss": 0.569,
"step": 25
},
{
"epoch": 1.36,
"learning_rate": 2e-05,
"loss": 0.5692,
"step": 26
},
{
"epoch": 1.41,
"learning_rate": 1.96969696969697e-05,
"loss": 0.5626,
"step": 27
},
{
"epoch": 1.46,
"learning_rate": 1.9393939393939395e-05,
"loss": 0.545,
"step": 28
},
{
"epoch": 1.52,
"learning_rate": 1.9090909090909094e-05,
"loss": 0.5551,
"step": 29
},
{
"epoch": 1.57,
"learning_rate": 1.8787878787878792e-05,
"loss": 0.5515,
"step": 30
},
{
"epoch": 1.62,
"learning_rate": 1.8484848484848487e-05,
"loss": 0.5383,
"step": 31
},
{
"epoch": 1.67,
"learning_rate": 1.8181818181818182e-05,
"loss": 0.5502,
"step": 32
},
{
"epoch": 1.73,
"learning_rate": 1.787878787878788e-05,
"loss": 0.5573,
"step": 33
},
{
"epoch": 1.78,
"learning_rate": 1.7575757575757576e-05,
"loss": 0.5423,
"step": 34
},
{
"epoch": 1.83,
"learning_rate": 1.7272727272727274e-05,
"loss": 0.5436,
"step": 35
},
{
"epoch": 1.88,
"learning_rate": 1.6969696969696972e-05,
"loss": 0.5366,
"step": 36
},
{
"epoch": 1.93,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.5452,
"step": 37
},
{
"epoch": 1.99,
"learning_rate": 1.6363636363636366e-05,
"loss": 0.5332,
"step": 38
},
{
"epoch": 2.04,
"learning_rate": 1.606060606060606e-05,
"loss": 0.494,
"step": 39
},
{
"epoch": 2.09,
"learning_rate": 1.575757575757576e-05,
"loss": 0.4812,
"step": 40
},
{
"epoch": 2.14,
"learning_rate": 1.5454545454545454e-05,
"loss": 0.4865,
"step": 41
},
{
"epoch": 2.2,
"learning_rate": 1.5151515151515153e-05,
"loss": 0.4863,
"step": 42
},
{
"epoch": 2.25,
"learning_rate": 1.484848484848485e-05,
"loss": 0.4869,
"step": 43
},
{
"epoch": 2.3,
"learning_rate": 1.4545454545454546e-05,
"loss": 0.4744,
"step": 44
},
{
"epoch": 2.35,
"learning_rate": 1.4242424242424245e-05,
"loss": 0.4769,
"step": 45
},
{
"epoch": 2.41,
"learning_rate": 1.3939393939393942e-05,
"loss": 0.4812,
"step": 46
},
{
"epoch": 2.46,
"learning_rate": 1.3636363636363637e-05,
"loss": 0.4683,
"step": 47
},
{
"epoch": 2.51,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.4783,
"step": 48
},
{
"epoch": 2.56,
"learning_rate": 1.3030303030303032e-05,
"loss": 0.471,
"step": 49
},
{
"epoch": 2.61,
"learning_rate": 1.2727272727272728e-05,
"loss": 0.4695,
"step": 50
},
{
"epoch": 2.67,
"learning_rate": 1.2424242424242425e-05,
"loss": 0.4616,
"step": 51
},
{
"epoch": 2.72,
"learning_rate": 1.2121212121212122e-05,
"loss": 0.4683,
"step": 52
},
{
"epoch": 2.77,
"learning_rate": 1.181818181818182e-05,
"loss": 0.471,
"step": 53
},
{
"epoch": 2.82,
"learning_rate": 1.1515151515151517e-05,
"loss": 0.457,
"step": 54
},
{
"epoch": 2.88,
"learning_rate": 1.1212121212121212e-05,
"loss": 0.4613,
"step": 55
},
{
"epoch": 2.93,
"learning_rate": 1.0909090909090909e-05,
"loss": 0.4585,
"step": 56
},
{
"epoch": 2.98,
"learning_rate": 1.0606060606060606e-05,
"loss": 0.4623,
"step": 57
},
{
"epoch": 3.03,
"learning_rate": 1.0303030303030304e-05,
"loss": 0.44,
"step": 58
},
{
"epoch": 3.09,
"learning_rate": 1e-05,
"loss": 0.4382,
"step": 59
},
{
"epoch": 3.14,
"learning_rate": 9.696969696969698e-06,
"loss": 0.4222,
"step": 60
},
{
"epoch": 3.19,
"learning_rate": 9.393939393939396e-06,
"loss": 0.4287,
"step": 61
},
{
"epoch": 3.24,
"learning_rate": 9.090909090909091e-06,
"loss": 0.4196,
"step": 62
},
{
"epoch": 3.29,
"learning_rate": 8.787878787878788e-06,
"loss": 0.417,
"step": 63
},
{
"epoch": 3.35,
"learning_rate": 8.484848484848486e-06,
"loss": 0.4203,
"step": 64
},
{
"epoch": 3.4,
"learning_rate": 8.181818181818183e-06,
"loss": 0.4195,
"step": 65
},
{
"epoch": 3.45,
"learning_rate": 7.87878787878788e-06,
"loss": 0.4221,
"step": 66
},
{
"epoch": 3.5,
"learning_rate": 7.5757575757575764e-06,
"loss": 0.4148,
"step": 67
},
{
"epoch": 3.56,
"learning_rate": 7.272727272727273e-06,
"loss": 0.4171,
"step": 68
},
{
"epoch": 3.61,
"learning_rate": 6.969696969696971e-06,
"loss": 0.4204,
"step": 69
},
{
"epoch": 3.66,
"learning_rate": 6.666666666666667e-06,
"loss": 0.4176,
"step": 70
},
{
"epoch": 3.71,
"learning_rate": 6.363636363636364e-06,
"loss": 0.4136,
"step": 71
},
{
"epoch": 3.77,
"learning_rate": 6.060606060606061e-06,
"loss": 0.4201,
"step": 72
},
{
"epoch": 3.82,
"learning_rate": 5.7575757575757586e-06,
"loss": 0.4065,
"step": 73
},
{
"epoch": 3.87,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.4163,
"step": 74
},
{
"epoch": 3.92,
"learning_rate": 5.151515151515152e-06,
"loss": 0.415,
"step": 75
}
],
"max_steps": 76,
"num_train_epochs": 4,
"total_flos": 8.226533079758406e+18,
"trial_name": null,
"trial_params": null
}