vit-base-beans / trainer_state.json
qubvel-hf's picture
qubvel-hf HF staff
End of training
198f273 verified
raw
history blame
13.4 kB
{
"best_metric": 1.0332472324371338,
"best_model_checkpoint": "./beans_outputs/checkpoint-650",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 650,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 2.0955963134765625,
"learning_rate": 1.9692307692307696e-05,
"loss": 1.1239,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.8236017227172852,
"learning_rate": 1.9384615384615386e-05,
"loss": 1.1222,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 1.9866633415222168,
"learning_rate": 1.907692307692308e-05,
"loss": 1.1165,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.4556338787078857,
"learning_rate": 1.876923076923077e-05,
"loss": 1.1047,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.6562598943710327,
"learning_rate": 1.8461538461538465e-05,
"loss": 1.1085,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 1.501336932182312,
"learning_rate": 1.8153846153846155e-05,
"loss": 1.1048,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.606107234954834,
"learning_rate": 1.784615384615385e-05,
"loss": 1.1031,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.634584426879883,
"learning_rate": 1.753846153846154e-05,
"loss": 1.0916,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.039886236190796,
"learning_rate": 1.7230769230769234e-05,
"loss": 1.095,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.4800949096679688,
"learning_rate": 1.6923076923076924e-05,
"loss": 1.0916,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.6766040325164795,
"learning_rate": 1.6615384615384618e-05,
"loss": 1.0959,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.2848353385925293,
"learning_rate": 1.630769230769231e-05,
"loss": 1.1005,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 4.07199239730835,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0881,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.41353383458646614,
"eval_loss": 1.0901767015457153,
"eval_runtime": 0.7894,
"eval_samples_per_second": 168.48,
"eval_steps_per_second": 21.535,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 2.0711560249328613,
"learning_rate": 1.5692307692307693e-05,
"loss": 1.0794,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.499732494354248,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0755,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.4569323062896729,
"learning_rate": 1.5076923076923078e-05,
"loss": 1.0807,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 2.351478338241577,
"learning_rate": 1.4769230769230772e-05,
"loss": 1.0965,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.1514322757720947,
"learning_rate": 1.4461538461538462e-05,
"loss": 1.0839,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.151601791381836,
"learning_rate": 1.4153846153846156e-05,
"loss": 1.0837,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.797500729560852,
"learning_rate": 1.3846153846153847e-05,
"loss": 1.0807,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 1.7583892345428467,
"learning_rate": 1.353846153846154e-05,
"loss": 1.073,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 3.5006496906280518,
"learning_rate": 1.3230769230769231e-05,
"loss": 1.0741,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 2.0179672241210938,
"learning_rate": 1.2923076923076925e-05,
"loss": 1.077,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 2.057086944580078,
"learning_rate": 1.2615384615384616e-05,
"loss": 1.0925,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.9710103273391724,
"learning_rate": 1.230769230769231e-05,
"loss": 1.0757,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 2.9315028190612793,
"learning_rate": 1.2e-05,
"loss": 1.0716,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.5037593984962406,
"eval_loss": 1.0685255527496338,
"eval_runtime": 0.7711,
"eval_samples_per_second": 172.471,
"eval_steps_per_second": 22.045,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 2.183527946472168,
"learning_rate": 1.1692307692307694e-05,
"loss": 1.0786,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.379652500152588,
"learning_rate": 1.1384615384615385e-05,
"loss": 1.0676,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 2.387296438217163,
"learning_rate": 1.1076923076923079e-05,
"loss": 1.064,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.8164947032928467,
"learning_rate": 1.076923076923077e-05,
"loss": 1.0548,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.849363088607788,
"learning_rate": 1.0461538461538463e-05,
"loss": 1.0693,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 2.0274300575256348,
"learning_rate": 1.0153846153846154e-05,
"loss": 1.0604,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 1.4544223546981812,
"learning_rate": 9.846153846153848e-06,
"loss": 1.0458,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 2.1712183952331543,
"learning_rate": 9.53846153846154e-06,
"loss": 1.0609,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.922677993774414,
"learning_rate": 9.230769230769232e-06,
"loss": 1.0646,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.8288860321044922,
"learning_rate": 8.923076923076925e-06,
"loss": 1.0597,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 2.123480796813965,
"learning_rate": 8.615384615384617e-06,
"loss": 1.052,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.820168137550354,
"learning_rate": 8.307692307692309e-06,
"loss": 1.0434,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 4.973505020141602,
"learning_rate": 8.000000000000001e-06,
"loss": 1.061,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6240601503759399,
"eval_loss": 1.0459144115447998,
"eval_runtime": 0.7618,
"eval_samples_per_second": 174.588,
"eval_steps_per_second": 22.316,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.1673223972320557,
"learning_rate": 7.692307692307694e-06,
"loss": 1.0616,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 1.8567888736724854,
"learning_rate": 7.384615384615386e-06,
"loss": 1.0501,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 2.0640571117401123,
"learning_rate": 7.076923076923078e-06,
"loss": 1.0611,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 2.215384006500244,
"learning_rate": 6.76923076923077e-06,
"loss": 1.0469,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 2.1000049114227295,
"learning_rate": 6.461538461538463e-06,
"loss": 1.0499,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.7218382358551025,
"learning_rate": 6.153846153846155e-06,
"loss": 1.0564,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 2.3569300174713135,
"learning_rate": 5.846153846153847e-06,
"loss": 1.0599,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 1.5210909843444824,
"learning_rate": 5.538461538461539e-06,
"loss": 1.0367,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.7621657848358154,
"learning_rate": 5.230769230769232e-06,
"loss": 1.0421,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 1.5097808837890625,
"learning_rate": 4.923076923076924e-06,
"loss": 1.0362,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.5118447542190552,
"learning_rate": 4.615384615384616e-06,
"loss": 1.0572,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 1.7513490915298462,
"learning_rate": 4.307692307692308e-06,
"loss": 1.0361,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 5.398025035858154,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0514,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.6015037593984962,
"eval_loss": 1.0407124757766724,
"eval_runtime": 0.7726,
"eval_samples_per_second": 172.155,
"eval_steps_per_second": 22.005,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 2.5516345500946045,
"learning_rate": 3.692307692307693e-06,
"loss": 1.0529,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 1.6976008415222168,
"learning_rate": 3.384615384615385e-06,
"loss": 1.0472,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 2.5672519207000732,
"learning_rate": 3.0769230769230774e-06,
"loss": 1.0565,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 2.166529655456543,
"learning_rate": 2.7692307692307697e-06,
"loss": 1.0619,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 1.961472511291504,
"learning_rate": 2.461538461538462e-06,
"loss": 1.0322,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 2.392319440841675,
"learning_rate": 2.153846153846154e-06,
"loss": 1.0388,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 2.3034205436706543,
"learning_rate": 1.8461538461538465e-06,
"loss": 1.0358,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 2.037050247192383,
"learning_rate": 1.5384615384615387e-06,
"loss": 1.0334,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 3.0737335681915283,
"learning_rate": 1.230769230769231e-06,
"loss": 1.0506,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 2.1824796199798584,
"learning_rate": 9.230769230769232e-07,
"loss": 1.0516,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 1.9239214658737183,
"learning_rate": 6.153846153846155e-07,
"loss": 1.0399,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 2.267302989959717,
"learning_rate": 3.0769230769230774e-07,
"loss": 1.0374,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 4.4957404136657715,
"learning_rate": 0.0,
"loss": 1.05,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.6766917293233082,
"eval_loss": 1.0332472324371338,
"eval_runtime": 0.8208,
"eval_samples_per_second": 162.035,
"eval_steps_per_second": 20.711,
"step": 650
},
{
"epoch": 5.0,
"step": 650,
"total_flos": 5.219455168198656e+16,
"train_loss": 1.0677934587918796,
"train_runtime": 48.1154,
"train_samples_per_second": 107.45,
"train_steps_per_second": 13.509
}
],
"logging_steps": 10,
"max_steps": 650,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.219455168198656e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}