Adonalsium-gpt2 / trainer_state.json
JakeTurner616's picture
Upload 8 files
6e0a231 verified
raw
history blame
10.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.7597626604112047,
"eval_steps": 500,
"global_step": 40000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.942504944574767e-05,
"loss": 3.2985,
"step": 500
},
{
"epoch": 0.07,
"learning_rate": 4.8850098891495336e-05,
"loss": 3.1395,
"step": 1000
},
{
"epoch": 0.1,
"learning_rate": 4.8275148337242996e-05,
"loss": 3.0915,
"step": 1500
},
{
"epoch": 0.14,
"learning_rate": 4.770019778299066e-05,
"loss": 3.0514,
"step": 2000
},
{
"epoch": 0.17,
"learning_rate": 4.712524722873833e-05,
"loss": 2.9975,
"step": 2500
},
{
"epoch": 0.21,
"learning_rate": 4.6550296674485996e-05,
"loss": 2.9986,
"step": 3000
},
{
"epoch": 0.24,
"learning_rate": 4.597534612023366e-05,
"loss": 2.9464,
"step": 3500
},
{
"epoch": 0.28,
"learning_rate": 4.540039556598133e-05,
"loss": 2.9419,
"step": 4000
},
{
"epoch": 0.31,
"learning_rate": 4.4825445011728997e-05,
"loss": 2.9467,
"step": 4500
},
{
"epoch": 0.34,
"learning_rate": 4.425049445747666e-05,
"loss": 2.9477,
"step": 5000
},
{
"epoch": 0.38,
"learning_rate": 4.367554390322432e-05,
"loss": 2.9098,
"step": 5500
},
{
"epoch": 0.41,
"learning_rate": 4.310059334897199e-05,
"loss": 2.9206,
"step": 6000
},
{
"epoch": 0.45,
"learning_rate": 4.252564279471966e-05,
"loss": 2.908,
"step": 6500
},
{
"epoch": 0.48,
"learning_rate": 4.1950692240467324e-05,
"loss": 2.8956,
"step": 7000
},
{
"epoch": 0.52,
"learning_rate": 4.1375741686214984e-05,
"loss": 2.9077,
"step": 7500
},
{
"epoch": 0.55,
"learning_rate": 4.080079113196265e-05,
"loss": 2.8966,
"step": 8000
},
{
"epoch": 0.59,
"learning_rate": 4.022584057771032e-05,
"loss": 2.8607,
"step": 8500
},
{
"epoch": 0.62,
"learning_rate": 3.9650890023457984e-05,
"loss": 2.8856,
"step": 9000
},
{
"epoch": 0.66,
"learning_rate": 3.907593946920565e-05,
"loss": 2.8774,
"step": 9500
},
{
"epoch": 0.69,
"learning_rate": 3.850098891495332e-05,
"loss": 2.8594,
"step": 10000
},
{
"epoch": 0.72,
"learning_rate": 3.7926038360700984e-05,
"loss": 2.8541,
"step": 10500
},
{
"epoch": 0.76,
"learning_rate": 3.735108780644865e-05,
"loss": 2.8643,
"step": 11000
},
{
"epoch": 0.79,
"learning_rate": 3.677613725219631e-05,
"loss": 2.8425,
"step": 11500
},
{
"epoch": 0.83,
"learning_rate": 3.620118669794398e-05,
"loss": 2.8535,
"step": 12000
},
{
"epoch": 0.86,
"learning_rate": 3.5626236143691645e-05,
"loss": 2.8506,
"step": 12500
},
{
"epoch": 0.9,
"learning_rate": 3.505128558943931e-05,
"loss": 2.8514,
"step": 13000
},
{
"epoch": 0.93,
"learning_rate": 3.447633503518697e-05,
"loss": 2.8401,
"step": 13500
},
{
"epoch": 0.97,
"learning_rate": 3.390138448093464e-05,
"loss": 2.8293,
"step": 14000
},
{
"epoch": 1.0,
"learning_rate": 3.3326433926682305e-05,
"loss": 2.8031,
"step": 14500
},
{
"epoch": 1.03,
"learning_rate": 3.275148337242998e-05,
"loss": 2.704,
"step": 15000
},
{
"epoch": 1.07,
"learning_rate": 3.217653281817764e-05,
"loss": 2.7216,
"step": 15500
},
{
"epoch": 1.1,
"learning_rate": 3.1601582263925305e-05,
"loss": 2.6997,
"step": 16000
},
{
"epoch": 1.14,
"learning_rate": 3.102663170967297e-05,
"loss": 2.6923,
"step": 16500
},
{
"epoch": 1.17,
"learning_rate": 3.0451681155420635e-05,
"loss": 2.7136,
"step": 17000
},
{
"epoch": 1.21,
"learning_rate": 2.9876730601168302e-05,
"loss": 2.6984,
"step": 17500
},
{
"epoch": 1.24,
"learning_rate": 2.9301780046915966e-05,
"loss": 2.7133,
"step": 18000
},
{
"epoch": 1.28,
"learning_rate": 2.8726829492663632e-05,
"loss": 2.6969,
"step": 18500
},
{
"epoch": 1.31,
"learning_rate": 2.8151878938411296e-05,
"loss": 2.6954,
"step": 19000
},
{
"epoch": 1.35,
"learning_rate": 2.7576928384158962e-05,
"loss": 2.7024,
"step": 19500
},
{
"epoch": 1.38,
"learning_rate": 2.7001977829906626e-05,
"loss": 2.7208,
"step": 20000
},
{
"epoch": 1.41,
"learning_rate": 2.6427027275654293e-05,
"loss": 2.683,
"step": 20500
},
{
"epoch": 1.45,
"learning_rate": 2.5852076721401963e-05,
"loss": 2.6896,
"step": 21000
},
{
"epoch": 1.48,
"learning_rate": 2.527712616714963e-05,
"loss": 2.7004,
"step": 21500
},
{
"epoch": 1.52,
"learning_rate": 2.470217561289729e-05,
"loss": 2.7053,
"step": 22000
},
{
"epoch": 1.55,
"learning_rate": 2.412722505864496e-05,
"loss": 2.6863,
"step": 22500
},
{
"epoch": 1.59,
"learning_rate": 2.3552274504392623e-05,
"loss": 2.6896,
"step": 23000
},
{
"epoch": 1.62,
"learning_rate": 2.297732395014029e-05,
"loss": 2.6897,
"step": 23500
},
{
"epoch": 1.66,
"learning_rate": 2.2402373395887953e-05,
"loss": 2.6733,
"step": 24000
},
{
"epoch": 1.69,
"learning_rate": 2.182742284163562e-05,
"loss": 2.684,
"step": 24500
},
{
"epoch": 1.72,
"learning_rate": 2.1252472287383283e-05,
"loss": 2.6966,
"step": 25000
},
{
"epoch": 1.76,
"learning_rate": 2.0677521733130954e-05,
"loss": 2.6691,
"step": 25500
},
{
"epoch": 1.79,
"learning_rate": 2.0102571178878617e-05,
"loss": 2.6901,
"step": 26000
},
{
"epoch": 1.83,
"learning_rate": 1.9527620624626284e-05,
"loss": 2.6852,
"step": 26500
},
{
"epoch": 1.86,
"learning_rate": 1.8952670070373947e-05,
"loss": 2.679,
"step": 27000
},
{
"epoch": 1.9,
"learning_rate": 1.8377719516121614e-05,
"loss": 2.6777,
"step": 27500
},
{
"epoch": 1.93,
"learning_rate": 1.7802768961869277e-05,
"loss": 2.6864,
"step": 28000
},
{
"epoch": 1.97,
"learning_rate": 1.7227818407616947e-05,
"loss": 2.6714,
"step": 28500
},
{
"epoch": 2.0,
"learning_rate": 1.665286785336461e-05,
"loss": 2.681,
"step": 29000
},
{
"epoch": 2.04,
"learning_rate": 1.6077917299112278e-05,
"loss": 2.6028,
"step": 29500
},
{
"epoch": 2.07,
"learning_rate": 1.550296674485994e-05,
"loss": 2.5981,
"step": 30000
},
{
"epoch": 2.1,
"learning_rate": 1.4928016190607608e-05,
"loss": 2.5976,
"step": 30500
},
{
"epoch": 2.14,
"learning_rate": 1.4353065636355273e-05,
"loss": 2.5921,
"step": 31000
},
{
"epoch": 2.17,
"learning_rate": 1.3778115082102941e-05,
"loss": 2.588,
"step": 31500
},
{
"epoch": 2.21,
"learning_rate": 1.3203164527850606e-05,
"loss": 2.5888,
"step": 32000
},
{
"epoch": 2.24,
"learning_rate": 1.2628213973598271e-05,
"loss": 2.6143,
"step": 32500
},
{
"epoch": 2.28,
"learning_rate": 1.2053263419345937e-05,
"loss": 2.5874,
"step": 33000
},
{
"epoch": 2.31,
"learning_rate": 1.1478312865093603e-05,
"loss": 2.5905,
"step": 33500
},
{
"epoch": 2.35,
"learning_rate": 1.0903362310841268e-05,
"loss": 2.6078,
"step": 34000
},
{
"epoch": 2.38,
"learning_rate": 1.0328411756588933e-05,
"loss": 2.5793,
"step": 34500
},
{
"epoch": 2.41,
"learning_rate": 9.7534612023366e-06,
"loss": 2.5968,
"step": 35000
},
{
"epoch": 2.45,
"learning_rate": 9.178510648084265e-06,
"loss": 2.595,
"step": 35500
},
{
"epoch": 2.48,
"learning_rate": 8.60356009383193e-06,
"loss": 2.5673,
"step": 36000
},
{
"epoch": 2.52,
"learning_rate": 8.028609539579597e-06,
"loss": 2.5954,
"step": 36500
},
{
"epoch": 2.55,
"learning_rate": 7.453658985327262e-06,
"loss": 2.5939,
"step": 37000
},
{
"epoch": 2.59,
"learning_rate": 6.878708431074927e-06,
"loss": 2.6008,
"step": 37500
},
{
"epoch": 2.62,
"learning_rate": 6.303757876822594e-06,
"loss": 2.6016,
"step": 38000
},
{
"epoch": 2.66,
"learning_rate": 5.728807322570259e-06,
"loss": 2.5827,
"step": 38500
},
{
"epoch": 2.69,
"learning_rate": 5.153856768317925e-06,
"loss": 2.5904,
"step": 39000
},
{
"epoch": 2.73,
"learning_rate": 4.57890621406559e-06,
"loss": 2.5745,
"step": 39500
},
{
"epoch": 2.76,
"learning_rate": 4.003955659813256e-06,
"loss": 2.5777,
"step": 40000
}
],
"logging_steps": 500,
"max_steps": 43482,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"total_flos": 1.0451289341952e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}