|
{ |
|
"best_metric": 1.9584715366363525, |
|
"best_model_checkpoint": "./outputs/checkpoint-4100", |
|
"epoch": 2.987249544626594, |
|
"eval_steps": 100, |
|
"global_step": 4100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0002, |
|
"loss": 2.7198, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 2.6175436973571777, |
|
"eval_runtime": 206.1344, |
|
"eval_samples_per_second": 30.436, |
|
"eval_steps_per_second": 3.808, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5808, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.5663881301879883, |
|
"eval_runtime": 206.2235, |
|
"eval_samples_per_second": 30.423, |
|
"eval_steps_per_second": 3.807, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5395, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 2.52937912940979, |
|
"eval_runtime": 205.8891, |
|
"eval_samples_per_second": 30.473, |
|
"eval_steps_per_second": 3.813, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_loss": 2.496067762374878, |
|
"eval_runtime": 206.0161, |
|
"eval_samples_per_second": 30.454, |
|
"eval_steps_per_second": 3.81, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4697, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 2.4705872535705566, |
|
"eval_runtime": 206.051, |
|
"eval_samples_per_second": 30.449, |
|
"eval_steps_per_second": 3.81, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0002, |
|
"loss": 2.445, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.445857048034668, |
|
"eval_runtime": 206.3699, |
|
"eval_samples_per_second": 30.402, |
|
"eval_steps_per_second": 3.804, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4272, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 2.4212183952331543, |
|
"eval_runtime": 205.7781, |
|
"eval_samples_per_second": 30.489, |
|
"eval_steps_per_second": 3.815, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0002, |
|
"loss": 2.4046, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_loss": 2.399564027786255, |
|
"eval_runtime": 205.9969, |
|
"eval_samples_per_second": 30.457, |
|
"eval_steps_per_second": 3.811, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3686, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.3772318363189697, |
|
"eval_runtime": 206.4179, |
|
"eval_samples_per_second": 30.395, |
|
"eval_steps_per_second": 3.803, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3654, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_loss": 2.3593590259552, |
|
"eval_runtime": 206.0361, |
|
"eval_samples_per_second": 30.451, |
|
"eval_steps_per_second": 3.81, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3585, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_loss": 2.33705472946167, |
|
"eval_runtime": 206.2411, |
|
"eval_samples_per_second": 30.421, |
|
"eval_steps_per_second": 3.806, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3156, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 2.318722724914551, |
|
"eval_runtime": 205.7818, |
|
"eval_samples_per_second": 30.489, |
|
"eval_steps_per_second": 3.815, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.0002, |
|
"loss": 2.3265, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"eval_loss": 2.3025875091552734, |
|
"eval_runtime": 206.4689, |
|
"eval_samples_per_second": 30.387, |
|
"eval_steps_per_second": 3.802, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2785, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 2.2806317806243896, |
|
"eval_runtime": 206.1627, |
|
"eval_samples_per_second": 30.432, |
|
"eval_steps_per_second": 3.808, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2404, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"eval_loss": 2.264216899871826, |
|
"eval_runtime": 206.3537, |
|
"eval_samples_per_second": 30.404, |
|
"eval_steps_per_second": 3.804, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0002, |
|
"loss": 2.232, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"eval_loss": 2.2468743324279785, |
|
"eval_runtime": 206.622, |
|
"eval_samples_per_second": 30.365, |
|
"eval_steps_per_second": 3.799, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2326, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 2.231027126312256, |
|
"eval_runtime": 206.9085, |
|
"eval_samples_per_second": 30.323, |
|
"eval_steps_per_second": 3.794, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1938, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_loss": 2.214425563812256, |
|
"eval_runtime": 294.4379, |
|
"eval_samples_per_second": 21.308, |
|
"eval_steps_per_second": 2.666, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1771, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"eval_loss": 2.1998817920684814, |
|
"eval_runtime": 206.8568, |
|
"eval_samples_per_second": 30.33, |
|
"eval_steps_per_second": 3.795, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1718, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"eval_loss": 2.185276985168457, |
|
"eval_runtime": 206.8061, |
|
"eval_samples_per_second": 30.338, |
|
"eval_steps_per_second": 3.796, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1536, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 2.173062562942505, |
|
"eval_runtime": 207.2032, |
|
"eval_samples_per_second": 30.279, |
|
"eval_steps_per_second": 3.789, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1373, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_loss": 2.155045986175537, |
|
"eval_runtime": 206.5769, |
|
"eval_samples_per_second": 30.371, |
|
"eval_steps_per_second": 3.8, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1267, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_loss": 2.143136501312256, |
|
"eval_runtime": 206.6375, |
|
"eval_samples_per_second": 30.362, |
|
"eval_steps_per_second": 3.799, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1334, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 2.130755662918091, |
|
"eval_runtime": 206.6096, |
|
"eval_samples_per_second": 30.366, |
|
"eval_steps_per_second": 3.799, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.0002, |
|
"loss": 2.101, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_loss": 2.1189475059509277, |
|
"eval_runtime": 218.5869, |
|
"eval_samples_per_second": 28.703, |
|
"eval_steps_per_second": 3.591, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.0002, |
|
"loss": 2.1023, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_loss": 2.1056134700775146, |
|
"eval_runtime": 206.4995, |
|
"eval_samples_per_second": 30.383, |
|
"eval_steps_per_second": 3.801, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0858, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 2.091339588165283, |
|
"eval_runtime": 350.1545, |
|
"eval_samples_per_second": 17.918, |
|
"eval_steps_per_second": 2.242, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0422, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 2.0826971530914307, |
|
"eval_runtime": 350.5431, |
|
"eval_samples_per_second": 17.898, |
|
"eval_steps_per_second": 2.239, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 0.0002, |
|
"loss": 2.013, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_loss": 2.070052146911621, |
|
"eval_runtime": 417.9713, |
|
"eval_samples_per_second": 15.011, |
|
"eval_steps_per_second": 1.878, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0363, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_loss": 2.062880277633667, |
|
"eval_runtime": 324.7358, |
|
"eval_samples_per_second": 19.32, |
|
"eval_steps_per_second": 2.417, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0072, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_loss": 2.051217794418335, |
|
"eval_runtime": 269.9913, |
|
"eval_samples_per_second": 23.238, |
|
"eval_steps_per_second": 2.908, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.0002, |
|
"loss": 2.02, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 2.039947509765625, |
|
"eval_runtime": 354.1313, |
|
"eval_samples_per_second": 17.717, |
|
"eval_steps_per_second": 2.217, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9872, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"eval_loss": 2.029729127883911, |
|
"eval_runtime": 353.5701, |
|
"eval_samples_per_second": 17.745, |
|
"eval_steps_per_second": 2.22, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9815, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"eval_loss": 2.0244295597076416, |
|
"eval_runtime": 360.2966, |
|
"eval_samples_per_second": 17.413, |
|
"eval_steps_per_second": 2.179, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9772, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"eval_loss": 2.010683536529541, |
|
"eval_runtime": 353.864, |
|
"eval_samples_per_second": 17.73, |
|
"eval_steps_per_second": 2.218, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9742, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 2.001563310623169, |
|
"eval_runtime": 352.348, |
|
"eval_samples_per_second": 17.806, |
|
"eval_steps_per_second": 2.228, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9551, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"eval_loss": 1.9941232204437256, |
|
"eval_runtime": 353.227, |
|
"eval_samples_per_second": 17.762, |
|
"eval_steps_per_second": 2.222, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9469, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"eval_loss": 1.9865385293960571, |
|
"eval_runtime": 353.5789, |
|
"eval_samples_per_second": 17.744, |
|
"eval_steps_per_second": 2.22, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9346, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"eval_loss": 1.9748302698135376, |
|
"eval_runtime": 353.4471, |
|
"eval_samples_per_second": 17.751, |
|
"eval_steps_per_second": 2.221, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0002, |
|
"loss": 1.937, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"eval_loss": 1.9734764099121094, |
|
"eval_runtime": 353.802, |
|
"eval_samples_per_second": 17.733, |
|
"eval_steps_per_second": 2.219, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0002, |
|
"loss": 1.9379, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_loss": 1.9584715366363525, |
|
"eval_runtime": 354.4627, |
|
"eval_samples_per_second": 17.7, |
|
"eval_steps_per_second": 2.215, |
|
"step": 4100 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 4116, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"total_flos": 1.2037527679500288e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|