dada22231's picture
Training in progress, step 30, checkpoint
28a3aae verified
raw
history blame
6.56 kB
{
"best_metric": 1.0031893253326416,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.0943396226415096,
"eval_steps": 25,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.10062893081761007,
"grad_norm": 0.3649255335330963,
"learning_rate": 5e-05,
"loss": 1.3684,
"step": 1
},
{
"epoch": 0.10062893081761007,
"eval_loss": 1.4094620943069458,
"eval_runtime": 6.6502,
"eval_samples_per_second": 7.519,
"eval_steps_per_second": 1.955,
"step": 1
},
{
"epoch": 0.20125786163522014,
"grad_norm": 0.30139613151550293,
"learning_rate": 0.0001,
"loss": 1.2944,
"step": 2
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.33318033814430237,
"learning_rate": 9.971704944519594e-05,
"loss": 1.4054,
"step": 3
},
{
"epoch": 0.4025157232704403,
"grad_norm": 0.29401275515556335,
"learning_rate": 9.887175604818206e-05,
"loss": 1.2595,
"step": 4
},
{
"epoch": 0.5031446540880503,
"grad_norm": 0.31839922070503235,
"learning_rate": 9.747474986387654e-05,
"loss": 1.3276,
"step": 5
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.26795294880867004,
"learning_rate": 9.554359905560886e-05,
"loss": 1.1699,
"step": 6
},
{
"epoch": 0.7044025157232704,
"grad_norm": 0.24816186726093292,
"learning_rate": 9.310258896527278e-05,
"loss": 1.2222,
"step": 7
},
{
"epoch": 0.8050314465408805,
"grad_norm": 0.30153927206993103,
"learning_rate": 9.018241671106134e-05,
"loss": 1.1705,
"step": 8
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.28425419330596924,
"learning_rate": 8.681980515339464e-05,
"loss": 1.1431,
"step": 9
},
{
"epoch": 1.0314465408805031,
"grad_norm": 0.3570249378681183,
"learning_rate": 8.305704108364301e-05,
"loss": 1.3933,
"step": 10
},
{
"epoch": 1.1320754716981132,
"grad_norm": 0.23974794149398804,
"learning_rate": 7.894144344319014e-05,
"loss": 1.0418,
"step": 11
},
{
"epoch": 1.2327044025157232,
"grad_norm": 0.20039339363574982,
"learning_rate": 7.452476826029011e-05,
"loss": 0.9259,
"step": 12
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.22616326808929443,
"learning_rate": 6.986255778798253e-05,
"loss": 1.1102,
"step": 13
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.18564777076244354,
"learning_rate": 6.501344202803414e-05,
"loss": 1.0617,
"step": 14
},
{
"epoch": 1.5345911949685536,
"grad_norm": 0.21298491954803467,
"learning_rate": 6.003840142464886e-05,
"loss": 1.0035,
"step": 15
},
{
"epoch": 1.6352201257861636,
"grad_norm": 0.19629953801631927,
"learning_rate": 5.500000000000001e-05,
"loss": 1.0182,
"step": 16
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.20243975520133972,
"learning_rate": 4.9961598575351155e-05,
"loss": 1.0048,
"step": 17
},
{
"epoch": 1.8364779874213837,
"grad_norm": 0.19140854477882385,
"learning_rate": 4.498655797196586e-05,
"loss": 0.998,
"step": 18
},
{
"epoch": 1.9371069182389937,
"grad_norm": 0.22857725620269775,
"learning_rate": 4.01374422120175e-05,
"loss": 1.1565,
"step": 19
},
{
"epoch": 2.0628930817610063,
"grad_norm": 0.20002713799476624,
"learning_rate": 3.547523173970989e-05,
"loss": 1.0742,
"step": 20
},
{
"epoch": 2.1635220125786163,
"grad_norm": 0.1631658673286438,
"learning_rate": 3.105855655680986e-05,
"loss": 0.9238,
"step": 21
},
{
"epoch": 2.2641509433962264,
"grad_norm": 0.18039830029010773,
"learning_rate": 2.6942958916356998e-05,
"loss": 0.9401,
"step": 22
},
{
"epoch": 2.3647798742138364,
"grad_norm": 0.18136851489543915,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.9679,
"step": 23
},
{
"epoch": 2.4654088050314464,
"grad_norm": 0.1782331019639969,
"learning_rate": 1.981758328893866e-05,
"loss": 0.9677,
"step": 24
},
{
"epoch": 2.5660377358490565,
"grad_norm": 0.1870107799768448,
"learning_rate": 1.6897411034727218e-05,
"loss": 0.9624,
"step": 25
},
{
"epoch": 2.5660377358490565,
"eval_loss": 1.0031893253326416,
"eval_runtime": 6.6639,
"eval_samples_per_second": 7.503,
"eval_steps_per_second": 1.951,
"step": 25
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.18223722279071808,
"learning_rate": 1.4456400944391146e-05,
"loss": 0.9384,
"step": 26
},
{
"epoch": 2.767295597484277,
"grad_norm": 0.1712333709001541,
"learning_rate": 1.252525013612346e-05,
"loss": 0.8976,
"step": 27
},
{
"epoch": 2.867924528301887,
"grad_norm": 0.1786852926015854,
"learning_rate": 1.1128243951817937e-05,
"loss": 0.91,
"step": 28
},
{
"epoch": 2.968553459119497,
"grad_norm": 0.21840958297252655,
"learning_rate": 1.0282950554804085e-05,
"loss": 1.1865,
"step": 29
},
{
"epoch": 3.0943396226415096,
"grad_norm": 0.17944976687431335,
"learning_rate": 1e-05,
"loss": 0.9459,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.679578899551355e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}