|
{ |
|
"best_metric": 2.143136501312256, |
|
"best_model_checkpoint": "./outputs/checkpoint-2300", |
|
"epoch": 1.6757741347905282, |
|
"eval_steps": 100, |
|
"global_step": 2300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7198, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6175436973571777, |
|
"eval_runtime": 206.1344, |
|
"eval_samples_per_second": 30.436, |
|
"eval_steps_per_second": 3.808, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5808, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.5663881301879883, |
|
"eval_runtime": 206.2235, |
|
"eval_samples_per_second": 30.423, |
|
"eval_steps_per_second": 3.807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5395, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.52937912940979, |
|
"eval_runtime": 205.8891, |
|
"eval_samples_per_second": 30.473, |
|
"eval_steps_per_second": 3.813, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.496067762374878, |
|
"eval_runtime": 206.0161, |
|
"eval_samples_per_second": 30.454, |
|
"eval_steps_per_second": 3.81, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4697, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.4705872535705566, |
|
"eval_runtime": 206.051, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.445, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.445857048034668, |
|
"eval_runtime": 206.3699, |
|
"eval_samples_per_second": 30.402, |
|
"eval_steps_per_second": 3.804, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4272, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.4212183952331543, |
|
"eval_runtime": 205.7781, |
|
"eval_samples_per_second": 30.489, |
|
"eval_steps_per_second": 3.815, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4046, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.399564027786255, |
|
"eval_runtime": 205.9969, |
|
"eval_samples_per_second": 30.457, |
|
"eval_steps_per_second": 3.811, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3686, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3772318363189697, |
|
"eval_runtime": 206.4179, |
|
"eval_samples_per_second": 30.395, |
|
"eval_steps_per_second": 3.803, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3654, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3593590259552, |
|
"eval_runtime": 206.0361, |
|
"eval_samples_per_second": 30.451, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3585, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.33705472946167, |
|
"eval_runtime": 206.2411, |
|
"eval_samples_per_second": 30.421, |
|
"eval_steps_per_second": 3.806, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3156, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.318722724914551, |
|
"eval_runtime": 205.7818, |
|
"eval_samples_per_second": 30.489, |
|
"eval_steps_per_second": 3.815, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3265, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3025875091552734, |
|
"eval_runtime": 206.4689, |
|
"eval_samples_per_second": 30.387, |
|
"eval_steps_per_second": 3.802, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2785, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.2806317806243896, |
|
"eval_runtime": 206.1627, |
|
"eval_samples_per_second": 30.432, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2404, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.264216899871826, |
|
"eval_runtime": 206.3537, |
|
"eval_samples_per_second": 30.404, |
|
"eval_steps_per_second": 3.804, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.232, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2468743324279785, |
|
"eval_runtime": 206.622, |
|
"eval_samples_per_second": 30.365, |
|
"eval_steps_per_second": 3.799, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2326, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.231027126312256, |
|
"eval_runtime": 206.9085, |
|
"eval_samples_per_second": 30.323, |
|
"eval_steps_per_second": 3.794, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1938, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.214425563812256, |
|
"eval_runtime": 294.4379, |
|
"eval_samples_per_second": 21.308, |
|
"eval_steps_per_second": 2.666, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1771, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.1998817920684814, |
|
"eval_runtime": 206.8568, |
|
"eval_samples_per_second": 30.33, |
|
"eval_steps_per_second": 3.795, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1718, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.185276985168457, |
|
"eval_runtime": 206.8061, |
|
"eval_samples_per_second": 30.338, |
|
"eval_steps_per_second": 3.796, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1536, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.173062562942505, |
|
"eval_runtime": 207.2032, |
|
"eval_samples_per_second": 30.279, |
|
"eval_steps_per_second": 3.789, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1373, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.155045986175537, |
|
"eval_runtime": 206.5769, |
|
"eval_samples_per_second": 30.371, |
|
"eval_steps_per_second": 3.8, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1267, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.143136501312256, |
|
"eval_runtime": 206.6375, |
|
"eval_samples_per_second": 30.362, |
|
"eval_steps_per_second": 3.799, |
|
"step": 2300 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 6.762876423432192e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|