|
{ |
|
"best_metric": 1.3940773010253906, |
|
"best_model_checkpoint": "ner-bert-ingredientstesting/checkpoint-18000", |
|
"epoch": 49.999663186258, |
|
"eval_steps": 750, |
|
"global_step": 37100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"step": 742, |
|
"total_flos": 3.211629347340288e+16, |
|
"train_loss": 5.698867345434636, |
|
"train_runtime": 6114.1675, |
|
"train_samples_per_second": 15.538, |
|
"train_steps_per_second": 0.121 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.9989218328840974e-05, |
|
"loss": 10.3965, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 9.694682121276855, |
|
"eval_runtime": 84.6491, |
|
"eval_samples_per_second": 59.067, |
|
"eval_steps_per_second": 0.933, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.897978436657682e-05, |
|
"loss": 4.9343, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_loss": 3.2859976291656494, |
|
"eval_runtime": 84.3631, |
|
"eval_samples_per_second": 59.268, |
|
"eval_steps_per_second": 0.936, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 4.796900269541779e-05, |
|
"loss": 2.8798, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"eval_loss": 2.4815261363983154, |
|
"eval_runtime": 84.3896, |
|
"eval_samples_per_second": 59.249, |
|
"eval_steps_per_second": 0.936, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 4.6958221024258764e-05, |
|
"loss": 2.1885, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"eval_loss": 1.9960429668426514, |
|
"eval_runtime": 84.3112, |
|
"eval_samples_per_second": 59.304, |
|
"eval_steps_per_second": 0.937, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 4.594743935309973e-05, |
|
"loss": 1.7594, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_loss": 1.7031219005584717, |
|
"eval_runtime": 84.3933, |
|
"eval_samples_per_second": 59.246, |
|
"eval_steps_per_second": 0.936, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 4.4936657681940705e-05, |
|
"loss": 1.4881, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"eval_loss": 1.558716893196106, |
|
"eval_runtime": 84.4241, |
|
"eval_samples_per_second": 59.225, |
|
"eval_steps_per_second": 0.936, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 4.392587601078167e-05, |
|
"loss": 1.2897, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"eval_loss": 1.5603703260421753, |
|
"eval_runtime": 84.4569, |
|
"eval_samples_per_second": 59.202, |
|
"eval_steps_per_second": 0.935, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 4.8989218328840976e-05, |
|
"loss": 3.6553, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"eval_loss": 2.3138411045074463, |
|
"eval_runtime": 85.1227, |
|
"eval_samples_per_second": 58.739, |
|
"eval_steps_per_second": 0.928, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 1.8488, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 9.1, |
|
"eval_loss": 1.7765036821365356, |
|
"eval_runtime": 84.6118, |
|
"eval_samples_per_second": 59.093, |
|
"eval_steps_per_second": 0.934, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 1.4372, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 10.11, |
|
"eval_loss": 1.5598968267440796, |
|
"eval_runtime": 84.6222, |
|
"eval_samples_per_second": 59.086, |
|
"eval_steps_per_second": 0.934, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"learning_rate": 4.595822102425876e-05, |
|
"loss": 1.2011, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"eval_loss": 1.4957914352416992, |
|
"eval_runtime": 84.6871, |
|
"eval_samples_per_second": 59.041, |
|
"eval_steps_per_second": 0.933, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"learning_rate": 4.4947439353099734e-05, |
|
"loss": 1.0493, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"eval_loss": 1.4617092609405518, |
|
"eval_runtime": 84.4178, |
|
"eval_samples_per_second": 59.229, |
|
"eval_steps_per_second": 0.936, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 13.14, |
|
"learning_rate": 4.39366576819407e-05, |
|
"loss": 0.922, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 13.14, |
|
"eval_loss": 1.3600549697875977, |
|
"eval_runtime": 84.2691, |
|
"eval_samples_per_second": 59.334, |
|
"eval_steps_per_second": 0.937, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"learning_rate": 4.949528301886793e-05, |
|
"loss": 3.0742, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 14.15, |
|
"eval_loss": 2.005005359649658, |
|
"eval_runtime": 84.8703, |
|
"eval_samples_per_second": 58.913, |
|
"eval_steps_per_second": 0.931, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"learning_rate": 4.898989218328841e-05, |
|
"loss": 1.4983, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 15.16, |
|
"eval_loss": 1.668813705444336, |
|
"eval_runtime": 84.5947, |
|
"eval_samples_per_second": 59.105, |
|
"eval_steps_per_second": 0.934, |
|
"step": 11250 |
|
}, |
|
{ |
|
"epoch": 16.17, |
|
"learning_rate": 4.84845013477089e-05, |
|
"loss": 1.1598, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 16.17, |
|
"eval_loss": 1.5255405902862549, |
|
"eval_runtime": 84.5763, |
|
"eval_samples_per_second": 59.118, |
|
"eval_steps_per_second": 0.934, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"learning_rate": 4.7979110512129385e-05, |
|
"loss": 0.9732, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"eval_loss": 1.4376968145370483, |
|
"eval_runtime": 84.6742, |
|
"eval_samples_per_second": 59.05, |
|
"eval_steps_per_second": 0.933, |
|
"step": 12750 |
|
}, |
|
{ |
|
"epoch": 18.19, |
|
"learning_rate": 4.7473719676549865e-05, |
|
"loss": 0.8466, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 18.19, |
|
"eval_loss": 1.4253113269805908, |
|
"eval_runtime": 84.5693, |
|
"eval_samples_per_second": 59.123, |
|
"eval_steps_per_second": 0.934, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 0.7497, |
|
"step": 14250 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"eval_loss": 1.3664312362670898, |
|
"eval_runtime": 84.6297, |
|
"eval_samples_per_second": 59.081, |
|
"eval_steps_per_second": 0.933, |
|
"step": 14250 |
|
}, |
|
{ |
|
"epoch": 20.22, |
|
"learning_rate": 4.899056603773585e-05, |
|
"loss": 2.6343, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 20.22, |
|
"eval_loss": 1.8943818807601929, |
|
"eval_runtime": 84.158, |
|
"eval_samples_per_second": 59.412, |
|
"eval_steps_per_second": 0.939, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 21.23, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 1.2528, |
|
"step": 15750 |
|
}, |
|
{ |
|
"epoch": 21.23, |
|
"eval_loss": 1.6253798007965088, |
|
"eval_runtime": 83.8622, |
|
"eval_samples_per_second": 59.622, |
|
"eval_steps_per_second": 0.942, |
|
"step": 15750 |
|
}, |
|
{ |
|
"epoch": 22.24, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 0.9735, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 22.24, |
|
"eval_loss": 1.4834989309310913, |
|
"eval_runtime": 83.8392, |
|
"eval_samples_per_second": 59.638, |
|
"eval_steps_per_second": 0.942, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 23.25, |
|
"learning_rate": 4.595822102425876e-05, |
|
"loss": 0.8205, |
|
"step": 17250 |
|
}, |
|
{ |
|
"epoch": 23.25, |
|
"eval_loss": 1.4433954954147339, |
|
"eval_runtime": 83.8368, |
|
"eval_samples_per_second": 59.64, |
|
"eval_steps_per_second": 0.942, |
|
"step": 17250 |
|
}, |
|
{ |
|
"epoch": 24.26, |
|
"learning_rate": 4.4947439353099734e-05, |
|
"loss": 0.7195, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 24.26, |
|
"eval_loss": 1.3940773010253906, |
|
"eval_runtime": 83.833, |
|
"eval_samples_per_second": 59.642, |
|
"eval_steps_per_second": 0.942, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"learning_rate": 4.3938005390835576e-05, |
|
"loss": 0.633, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 25.27, |
|
"eval_loss": 1.443783164024353, |
|
"eval_runtime": 83.8619, |
|
"eval_samples_per_second": 59.622, |
|
"eval_steps_per_second": 0.942, |
|
"step": 18750 |
|
}, |
|
{ |
|
"epoch": 26.28, |
|
"learning_rate": 4.899056603773585e-05, |
|
"loss": 2.3512, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 26.28, |
|
"eval_loss": 1.8179031610488892, |
|
"eval_runtime": 84.7172, |
|
"eval_samples_per_second": 59.02, |
|
"eval_steps_per_second": 0.933, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 27.29, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 1.1042, |
|
"step": 20250 |
|
}, |
|
{ |
|
"epoch": 27.29, |
|
"eval_loss": 1.5888350009918213, |
|
"eval_runtime": 84.3483, |
|
"eval_samples_per_second": 59.278, |
|
"eval_steps_per_second": 0.937, |
|
"step": 20250 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 0.8597, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 28.3, |
|
"eval_loss": 1.5137015581130981, |
|
"eval_runtime": 84.2921, |
|
"eval_samples_per_second": 59.318, |
|
"eval_steps_per_second": 0.937, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 29.31, |
|
"learning_rate": 4.595822102425876e-05, |
|
"loss": 0.7273, |
|
"step": 21750 |
|
}, |
|
{ |
|
"epoch": 29.31, |
|
"eval_loss": 1.4939533472061157, |
|
"eval_runtime": 84.3697, |
|
"eval_samples_per_second": 59.263, |
|
"eval_steps_per_second": 0.936, |
|
"step": 21750 |
|
}, |
|
{ |
|
"epoch": 30.32, |
|
"learning_rate": 4.4948787061994616e-05, |
|
"loss": 0.6283, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 30.32, |
|
"eval_loss": 1.4074889421463013, |
|
"eval_runtime": 84.3283, |
|
"eval_samples_per_second": 59.292, |
|
"eval_steps_per_second": 0.937, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 4.3939353099730465e-05, |
|
"loss": 0.5533, |
|
"step": 23250 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"eval_loss": 1.4201693534851074, |
|
"eval_runtime": 84.3754, |
|
"eval_samples_per_second": 59.259, |
|
"eval_steps_per_second": 0.936, |
|
"step": 23250 |
|
}, |
|
{ |
|
"epoch": 32.34, |
|
"learning_rate": 4.899056603773585e-05, |
|
"loss": 2.2264, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 32.34, |
|
"eval_loss": 1.830324649810791, |
|
"eval_runtime": 84.4012, |
|
"eval_samples_per_second": 59.241, |
|
"eval_steps_per_second": 0.936, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 33.36, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 1.0061, |
|
"step": 24750 |
|
}, |
|
{ |
|
"epoch": 33.36, |
|
"eval_loss": 1.585161566734314, |
|
"eval_runtime": 84.0634, |
|
"eval_samples_per_second": 59.479, |
|
"eval_steps_per_second": 0.94, |
|
"step": 24750 |
|
}, |
|
{ |
|
"epoch": 34.37, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 0.7859, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 34.37, |
|
"eval_loss": 1.5000197887420654, |
|
"eval_runtime": 83.979, |
|
"eval_samples_per_second": 59.539, |
|
"eval_steps_per_second": 0.941, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 35.38, |
|
"learning_rate": 4.595956873315364e-05, |
|
"loss": 0.6611, |
|
"step": 26250 |
|
}, |
|
{ |
|
"epoch": 35.38, |
|
"eval_loss": 1.4847756624221802, |
|
"eval_runtime": 84.066, |
|
"eval_samples_per_second": 59.477, |
|
"eval_steps_per_second": 0.94, |
|
"step": 26250 |
|
}, |
|
{ |
|
"epoch": 36.39, |
|
"learning_rate": 4.4948787061994616e-05, |
|
"loss": 0.571, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 36.39, |
|
"eval_loss": 1.4602794647216797, |
|
"eval_runtime": 84.1634, |
|
"eval_samples_per_second": 59.408, |
|
"eval_steps_per_second": 0.939, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 37.4, |
|
"learning_rate": 4.3938005390835576e-05, |
|
"loss": 0.5044, |
|
"step": 27750 |
|
}, |
|
{ |
|
"epoch": 37.4, |
|
"eval_loss": 1.4159477949142456, |
|
"eval_runtime": 84.0529, |
|
"eval_samples_per_second": 59.486, |
|
"eval_steps_per_second": 0.94, |
|
"step": 27750 |
|
}, |
|
{ |
|
"epoch": 38.41, |
|
"learning_rate": 4.899056603773585e-05, |
|
"loss": 2.0783, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 38.41, |
|
"eval_loss": 1.7679896354675293, |
|
"eval_runtime": 85.4608, |
|
"eval_samples_per_second": 58.506, |
|
"eval_steps_per_second": 0.924, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 39.42, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 0.9177, |
|
"step": 29250 |
|
}, |
|
{ |
|
"epoch": 39.42, |
|
"eval_loss": 1.6010451316833496, |
|
"eval_runtime": 85.1034, |
|
"eval_samples_per_second": 58.752, |
|
"eval_steps_per_second": 0.928, |
|
"step": 29250 |
|
}, |
|
{ |
|
"epoch": 40.43, |
|
"learning_rate": 4.696900269541779e-05, |
|
"loss": 0.7137, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 40.43, |
|
"eval_loss": 1.4925891160964966, |
|
"eval_runtime": 85.1688, |
|
"eval_samples_per_second": 58.707, |
|
"eval_steps_per_second": 0.928, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 41.44, |
|
"learning_rate": 4.595956873315364e-05, |
|
"loss": 0.596, |
|
"step": 30750 |
|
}, |
|
{ |
|
"epoch": 41.44, |
|
"eval_loss": 1.5095667839050293, |
|
"eval_runtime": 85.1603, |
|
"eval_samples_per_second": 58.713, |
|
"eval_steps_per_second": 0.928, |
|
"step": 30750 |
|
}, |
|
{ |
|
"epoch": 42.45, |
|
"learning_rate": 4.4948787061994616e-05, |
|
"loss": 0.5146, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 42.45, |
|
"eval_loss": 1.4808692932128906, |
|
"eval_runtime": 85.1817, |
|
"eval_samples_per_second": 58.698, |
|
"eval_steps_per_second": 0.927, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 43.46, |
|
"learning_rate": 4.3938005390835576e-05, |
|
"loss": 0.4453, |
|
"step": 32250 |
|
}, |
|
{ |
|
"epoch": 43.46, |
|
"eval_loss": 1.5037322044372559, |
|
"eval_runtime": 85.2811, |
|
"eval_samples_per_second": 58.63, |
|
"eval_steps_per_second": 0.926, |
|
"step": 32250 |
|
}, |
|
{ |
|
"epoch": 44.47, |
|
"learning_rate": 4.949528301886793e-05, |
|
"loss": 2.0138, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 44.47, |
|
"eval_loss": 1.798643708229065, |
|
"eval_runtime": 84.6945, |
|
"eval_samples_per_second": 59.036, |
|
"eval_steps_per_second": 0.933, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 45.48, |
|
"learning_rate": 4.898989218328841e-05, |
|
"loss": 0.8625, |
|
"step": 33750 |
|
}, |
|
{ |
|
"epoch": 45.48, |
|
"eval_loss": 1.6345179080963135, |
|
"eval_runtime": 84.4707, |
|
"eval_samples_per_second": 59.192, |
|
"eval_steps_per_second": 0.935, |
|
"step": 33750 |
|
}, |
|
{ |
|
"epoch": 46.49, |
|
"learning_rate": 4.848517520215634e-05, |
|
"loss": 0.6724, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 46.49, |
|
"eval_loss": 1.5662704706192017, |
|
"eval_runtime": 84.3899, |
|
"eval_samples_per_second": 59.249, |
|
"eval_steps_per_second": 0.936, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 47.51, |
|
"learning_rate": 4.7979784366576826e-05, |
|
"loss": 0.5584, |
|
"step": 35250 |
|
}, |
|
{ |
|
"epoch": 47.51, |
|
"eval_loss": 1.5644365549087524, |
|
"eval_runtime": 84.4556, |
|
"eval_samples_per_second": 59.203, |
|
"eval_steps_per_second": 0.935, |
|
"step": 35250 |
|
}, |
|
{ |
|
"epoch": 48.52, |
|
"learning_rate": 4.7474393530997306e-05, |
|
"loss": 0.4816, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 48.52, |
|
"eval_loss": 1.5659728050231934, |
|
"eval_runtime": 84.3909, |
|
"eval_samples_per_second": 59.248, |
|
"eval_steps_per_second": 0.936, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 49.53, |
|
"learning_rate": 4.6969676549865234e-05, |
|
"loss": 0.4128, |
|
"step": 36750 |
|
}, |
|
{ |
|
"epoch": 49.53, |
|
"eval_loss": 1.4841716289520264, |
|
"eval_runtime": 84.3983, |
|
"eval_samples_per_second": 59.243, |
|
"eval_steps_per_second": 0.936, |
|
"step": 36750 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 37100, |
|
"total_flos": 1.60568482342699e+18, |
|
"train_loss": 0.02558288409703504, |
|
"train_runtime": 2894.6287, |
|
"train_samples_per_second": 1640.97, |
|
"train_steps_per_second": 12.817 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 37100, |
|
"total_flos": 1.60568482342699e+18, |
|
"train_loss": 0.0, |
|
"train_runtime": 1.9442, |
|
"train_samples_per_second": 2443166.931, |
|
"train_steps_per_second": 19082.42 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"step": 37100, |
|
"total_flos": 1.60568482342699e+18, |
|
"train_loss": 0.0, |
|
"train_runtime": 1.9174, |
|
"train_samples_per_second": 2477257.733, |
|
"train_steps_per_second": 19348.687 |
|
} |
|
], |
|
"logging_steps": 750, |
|
"max_steps": 37100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 50, |
|
"save_steps": 750, |
|
"total_flos": 1.60568482342699e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|