|
{ |
|
"best_metric": 0.5640622093599114, |
|
"best_model_checkpoint": "models_intermediate/intermediate_it5-large/checkpoint-50000", |
|
"epoch": 25.0, |
|
"eval_steps": 10000, |
|
"global_step": 50000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 3.9200000000000004e-05, |
|
"loss": 3.656, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.8400000000000005e-05, |
|
"loss": 1.506, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.76e-05, |
|
"loss": 0.676, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.680000000000001e-05, |
|
"loss": 0.4488, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.3892, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.52e-05, |
|
"loss": 0.447, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 3.44e-05, |
|
"loss": 0.384, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3600000000000004e-05, |
|
"loss": 0.346, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.28e-05, |
|
"loss": 0.3349, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.3415, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.2377464771270752, |
|
"eval_p-value": 3.8666537188825695e-181, |
|
"eval_runtime": 246.1373, |
|
"eval_samples_per_second": 16.251, |
|
"eval_spearman": 0.4315122856249831, |
|
"eval_steps_per_second": 2.031, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 3.1200000000000006e-05, |
|
"loss": 0.316, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3.0400000000000004e-05, |
|
"loss": 0.3282, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 2.96e-05, |
|
"loss": 0.2925, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 2.8800000000000002e-05, |
|
"loss": 0.2841, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.2719, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 2.7200000000000004e-05, |
|
"loss": 0.2694, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 2.6400000000000005e-05, |
|
"loss": 0.2592, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 2.5600000000000002e-05, |
|
"loss": 0.2563, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 2.4800000000000003e-05, |
|
"loss": 0.2599, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.2412, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.19882595539093018, |
|
"eval_p-value": 2.0894786180148802e-236, |
|
"eval_runtime": 237.5854, |
|
"eval_samples_per_second": 16.836, |
|
"eval_spearman": 0.4861580881492778, |
|
"eval_steps_per_second": 2.105, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 2.32e-05, |
|
"loss": 0.2347, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 2.2400000000000002e-05, |
|
"loss": 0.2332, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 2.1600000000000003e-05, |
|
"loss": 0.2299, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 2.08e-05, |
|
"loss": 0.2258, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2211, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.9200000000000003e-05, |
|
"loss": 0.223, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 1.8400000000000003e-05, |
|
"loss": 0.2201, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.76e-05, |
|
"loss": 0.2131, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 1.6800000000000002e-05, |
|
"loss": 0.2131, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.2153, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.18797849118709564, |
|
"eval_p-value": 5.983697306181229e-266, |
|
"eval_runtime": 248.0218, |
|
"eval_samples_per_second": 16.128, |
|
"eval_spearman": 0.511742524682024, |
|
"eval_steps_per_second": 2.016, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 1.5200000000000002e-05, |
|
"loss": 0.4026, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.4400000000000001e-05, |
|
"loss": 0.2148, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 1.3600000000000002e-05, |
|
"loss": 0.208, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.2800000000000001e-05, |
|
"loss": 0.2121, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.2064, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.1200000000000001e-05, |
|
"loss": 0.2075, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 1.04e-05, |
|
"loss": 0.2095, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 0.2058, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 8.8e-06, |
|
"loss": 0.2045, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.2054, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.1777072548866272, |
|
"eval_p-value": 8.897490187001762e-304, |
|
"eval_runtime": 252.1853, |
|
"eval_samples_per_second": 15.861, |
|
"eval_spearman": 0.541600893005368, |
|
"eval_steps_per_second": 1.983, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"loss": 0.2044, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"loss": 0.2003, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 0.1978, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 0.2044, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.1994, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"loss": 0.1972, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 0.2022, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 0.1955, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 0.1988, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.1967, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.17539779841899872, |
|
"eval_p-value": 0.0, |
|
"eval_runtime": 247.6044, |
|
"eval_samples_per_second": 16.155, |
|
"eval_spearman": 0.5640622093599114, |
|
"eval_steps_per_second": 2.019, |
|
"step": 50000 |
|
} |
|
], |
|
"logging_steps": 1000, |
|
"max_steps": 50000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 25, |
|
"save_steps": 10000, |
|
"total_flos": 4.609378615296e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|