|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 19.999384615384617, |
|
"global_step": 8120, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.23661810159683228, |
|
"eval_mse": 0.23661810159683228, |
|
"eval_runtime": 2.3458, |
|
"eval_samples_per_second": 213.15, |
|
"eval_steps_per_second": 26.857, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 2.8341964112881786e-05, |
|
"loss": 0.5408, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.174851194024086, |
|
"eval_mse": 0.1748511791229248, |
|
"eval_runtime": 2.4627, |
|
"eval_samples_per_second": 203.028, |
|
"eval_steps_per_second": 25.582, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 2.648225518159033e-05, |
|
"loss": 0.1601, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.16912883520126343, |
|
"eval_mse": 0.16912883520126343, |
|
"eval_runtime": 2.4762, |
|
"eval_samples_per_second": 201.92, |
|
"eval_steps_per_second": 25.442, |
|
"step": 1218 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.4622546250298872e-05, |
|
"loss": 0.1051, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.15406909584999084, |
|
"eval_mse": 0.15406909584999084, |
|
"eval_runtime": 2.4694, |
|
"eval_samples_per_second": 202.478, |
|
"eval_steps_per_second": 25.512, |
|
"step": 1624 |
|
}, |
|
{ |
|
"epoch": 4.93, |
|
"learning_rate": 2.2762837319007417e-05, |
|
"loss": 0.0784, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 0.16454747319221497, |
|
"eval_mse": 0.16454748809337616, |
|
"eval_runtime": 2.4788, |
|
"eval_samples_per_second": 201.707, |
|
"eval_steps_per_second": 25.415, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.13350126147270203, |
|
"eval_mse": 0.13350127637386322, |
|
"eval_runtime": 2.3387, |
|
"eval_samples_per_second": 213.798, |
|
"eval_steps_per_second": 26.939, |
|
"step": 2436 |
|
}, |
|
{ |
|
"epoch": 6.16, |
|
"learning_rate": 2.0903128387715962e-05, |
|
"loss": 0.0615, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.11380095779895782, |
|
"eval_mse": 0.11380096524953842, |
|
"eval_runtime": 2.4535, |
|
"eval_samples_per_second": 203.787, |
|
"eval_steps_per_second": 25.677, |
|
"step": 2842 |
|
}, |
|
{ |
|
"epoch": 7.39, |
|
"learning_rate": 1.9043419456424507e-05, |
|
"loss": 0.0486, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.11023622751235962, |
|
"eval_mse": 0.11023622006177902, |
|
"eval_runtime": 2.3622, |
|
"eval_samples_per_second": 211.671, |
|
"eval_steps_per_second": 26.671, |
|
"step": 3248 |
|
}, |
|
{ |
|
"epoch": 8.62, |
|
"learning_rate": 1.718371052513305e-05, |
|
"loss": 0.04, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.13071884214878082, |
|
"eval_mse": 0.13071882724761963, |
|
"eval_runtime": 2.3355, |
|
"eval_samples_per_second": 214.09, |
|
"eval_steps_per_second": 26.975, |
|
"step": 3654 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 1.5324001593841596e-05, |
|
"loss": 0.0333, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.11540825664997101, |
|
"eval_mse": 0.11540825664997101, |
|
"eval_runtime": 2.3363, |
|
"eval_samples_per_second": 214.015, |
|
"eval_steps_per_second": 26.966, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.08316297829151154, |
|
"eval_mse": 0.08316297829151154, |
|
"eval_runtime": 2.3934, |
|
"eval_samples_per_second": 208.905, |
|
"eval_steps_per_second": 26.322, |
|
"step": 4466 |
|
}, |
|
{ |
|
"epoch": 11.08, |
|
"learning_rate": 1.346429266255014e-05, |
|
"loss": 0.0303, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.090473473072052, |
|
"eval_mse": 0.090473473072052, |
|
"eval_runtime": 2.48, |
|
"eval_samples_per_second": 201.616, |
|
"eval_steps_per_second": 25.404, |
|
"step": 4872 |
|
}, |
|
{ |
|
"epoch": 12.32, |
|
"learning_rate": 1.1604583731258682e-05, |
|
"loss": 0.0243, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.08971528708934784, |
|
"eval_mse": 0.08971529453992844, |
|
"eval_runtime": 2.3125, |
|
"eval_samples_per_second": 216.213, |
|
"eval_steps_per_second": 27.243, |
|
"step": 5278 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 9.744874799967227e-06, |
|
"loss": 0.0222, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.08441515266895294, |
|
"eval_mse": 0.08441514521837234, |
|
"eval_runtime": 2.4412, |
|
"eval_samples_per_second": 204.815, |
|
"eval_steps_per_second": 25.807, |
|
"step": 5684 |
|
}, |
|
{ |
|
"epoch": 14.78, |
|
"learning_rate": 7.885165868675772e-06, |
|
"loss": 0.0194, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.08390124142169952, |
|
"eval_mse": 0.08390124142169952, |
|
"eval_runtime": 2.5105, |
|
"eval_samples_per_second": 199.165, |
|
"eval_steps_per_second": 25.095, |
|
"step": 6090 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.07694818079471588, |
|
"eval_mse": 0.07694818079471588, |
|
"eval_runtime": 2.4507, |
|
"eval_samples_per_second": 204.025, |
|
"eval_steps_per_second": 25.707, |
|
"step": 6496 |
|
}, |
|
{ |
|
"epoch": 16.01, |
|
"learning_rate": 6.025456937384317e-06, |
|
"loss": 0.0173, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.07610571384429932, |
|
"eval_mse": 0.07610571384429932, |
|
"eval_runtime": 2.3078, |
|
"eval_samples_per_second": 216.656, |
|
"eval_steps_per_second": 27.299, |
|
"step": 6902 |
|
}, |
|
{ |
|
"epoch": 17.24, |
|
"learning_rate": 4.165748006092861e-06, |
|
"loss": 0.0156, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.06984403729438782, |
|
"eval_mse": 0.06984403729438782, |
|
"eval_runtime": 2.3322, |
|
"eval_samples_per_second": 214.388, |
|
"eval_steps_per_second": 27.013, |
|
"step": 7308 |
|
}, |
|
{ |
|
"epoch": 18.47, |
|
"learning_rate": 2.306039074801405e-06, |
|
"loss": 0.0138, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.07631801068782806, |
|
"eval_mse": 0.07631801813840866, |
|
"eval_runtime": 2.4913, |
|
"eval_samples_per_second": 200.701, |
|
"eval_steps_per_second": 25.288, |
|
"step": 7714 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 4.4633014350994935e-07, |
|
"loss": 0.0128, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.07459883391857147, |
|
"eval_mse": 0.07459883391857147, |
|
"eval_runtime": 2.3857, |
|
"eval_samples_per_second": 209.585, |
|
"eval_steps_per_second": 26.408, |
|
"step": 8120 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 8120, |
|
"total_flos": 8550769413547008.0, |
|
"train_loss": 0.07552281259903179, |
|
"train_runtime": 3008.7042, |
|
"train_samples_per_second": 43.208, |
|
"train_steps_per_second": 2.699 |
|
} |
|
], |
|
"max_steps": 8120, |
|
"num_train_epochs": 20, |
|
"total_flos": 8550769413547008.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|