|
{ |
|
"best_metric": 0.12837110459804535, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1182033096926714, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001182033096926714, |
|
"grad_norm": 0.45257845520973206, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.7527, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001182033096926714, |
|
"eval_loss": 0.21998687088489532, |
|
"eval_runtime": 6.5713, |
|
"eval_samples_per_second": 7.609, |
|
"eval_steps_per_second": 1.065, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002364066193853428, |
|
"grad_norm": 0.4749073386192322, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.6891, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0035460992907801418, |
|
"grad_norm": 0.4979456961154938, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.8038, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004728132387706856, |
|
"grad_norm": 0.4834277331829071, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.69, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00591016548463357, |
|
"grad_norm": 0.4842442274093628, |
|
"learning_rate": 0.00015, |
|
"loss": 0.8362, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0070921985815602835, |
|
"grad_norm": 0.37017032504081726, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.7844, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.008274231678486997, |
|
"grad_norm": 0.20203356444835663, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.5235, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.009456264775413711, |
|
"grad_norm": 0.35705870389938354, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.5528, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.010638297872340425, |
|
"grad_norm": 0.3792789876461029, |
|
"learning_rate": 0.00027, |
|
"loss": 0.5373, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01182033096926714, |
|
"grad_norm": 0.3239419460296631, |
|
"learning_rate": 0.0003, |
|
"loss": 0.4818, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013002364066193853, |
|
"grad_norm": 0.29636237025260925, |
|
"learning_rate": 0.0002999794957488703, |
|
"loss": 0.5745, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.014184397163120567, |
|
"grad_norm": 0.3467446565628052, |
|
"learning_rate": 0.0002999179886011389, |
|
"loss": 0.6402, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.015366430260047281, |
|
"grad_norm": 0.23589834570884705, |
|
"learning_rate": 0.0002998154953722457, |
|
"loss": 0.6432, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.016548463356973995, |
|
"grad_norm": 0.195448100566864, |
|
"learning_rate": 0.00029967204408281613, |
|
"loss": 0.5233, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01773049645390071, |
|
"grad_norm": 0.2630046010017395, |
|
"learning_rate": 0.00029948767395100045, |
|
"loss": 0.5089, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.018912529550827423, |
|
"grad_norm": 0.18320521712303162, |
|
"learning_rate": 0.0002992624353817517, |
|
"loss": 0.3504, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02009456264775414, |
|
"grad_norm": 0.16204728186130524, |
|
"learning_rate": 0.0002989963899530457, |
|
"loss": 0.3852, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02127659574468085, |
|
"grad_norm": 0.24153684079647064, |
|
"learning_rate": 0.00029868961039904624, |
|
"loss": 0.8729, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.022458628841607566, |
|
"grad_norm": 0.16790688037872314, |
|
"learning_rate": 0.00029834218059022024, |
|
"loss": 0.3754, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02364066193853428, |
|
"grad_norm": 0.24352045357227325, |
|
"learning_rate": 0.00029795419551040833, |
|
"loss": 0.3613, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.024822695035460994, |
|
"grad_norm": 0.17234143614768982, |
|
"learning_rate": 0.00029752576123085736, |
|
"loss": 0.4107, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.026004728132387706, |
|
"grad_norm": 0.16111795604228973, |
|
"learning_rate": 0.0002970569948812214, |
|
"loss": 0.2606, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.027186761229314422, |
|
"grad_norm": 0.12541893124580383, |
|
"learning_rate": 0.0002965480246175399, |
|
"loss": 0.2757, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.028368794326241134, |
|
"grad_norm": 0.14481070637702942, |
|
"learning_rate": 0.0002959989895872009, |
|
"loss": 0.2086, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02955082742316785, |
|
"grad_norm": 0.2305174022912979, |
|
"learning_rate": 0.0002954100398908995, |
|
"loss": 0.4623, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02955082742316785, |
|
"eval_loss": 0.13037407398223877, |
|
"eval_runtime": 6.8052, |
|
"eval_samples_per_second": 7.347, |
|
"eval_steps_per_second": 1.029, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.030732860520094562, |
|
"grad_norm": 0.19182905554771423, |
|
"learning_rate": 0.0002947813365416023, |
|
"loss": 0.4473, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.031914893617021274, |
|
"grad_norm": 0.24724812805652618, |
|
"learning_rate": 0.0002941130514205272, |
|
"loss": 0.4684, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03309692671394799, |
|
"grad_norm": 0.15102490782737732, |
|
"learning_rate": 0.0002934053672301536, |
|
"loss": 0.2546, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.034278959810874705, |
|
"grad_norm": 0.35056641697883606, |
|
"learning_rate": 0.00029265847744427303, |
|
"loss": 0.3202, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03546099290780142, |
|
"grad_norm": 0.2419431358575821, |
|
"learning_rate": 0.00029187258625509513, |
|
"loss": 0.3701, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03664302600472813, |
|
"grad_norm": 0.15826299786567688, |
|
"learning_rate": 0.00029104790851742417, |
|
"loss": 0.2572, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.037825059101654845, |
|
"grad_norm": 0.2803819179534912, |
|
"learning_rate": 0.0002901846696899191, |
|
"loss": 0.4723, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03900709219858156, |
|
"grad_norm": 0.22237618267536163, |
|
"learning_rate": 0.00028928310577345606, |
|
"loss": 0.3599, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.04018912529550828, |
|
"grad_norm": 0.2598985731601715, |
|
"learning_rate": 0.0002883434632466077, |
|
"loss": 0.501, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.041371158392434985, |
|
"grad_norm": 0.4324254095554352, |
|
"learning_rate": 0.00028736599899825856, |
|
"loss": 0.4615, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0425531914893617, |
|
"grad_norm": 0.16531601548194885, |
|
"learning_rate": 0.00028635098025737434, |
|
"loss": 0.1464, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04373522458628842, |
|
"grad_norm": 0.16330963373184204, |
|
"learning_rate": 0.00028529868451994384, |
|
"loss": 0.2302, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04491725768321513, |
|
"grad_norm": 0.16654549539089203, |
|
"learning_rate": 0.0002842093994731145, |
|
"loss": 0.2296, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04609929078014184, |
|
"grad_norm": 0.5145742893218994, |
|
"learning_rate": 0.00028308342291654174, |
|
"loss": 0.2124, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04728132387706856, |
|
"grad_norm": 0.19344383478164673, |
|
"learning_rate": 0.00028192106268097334, |
|
"loss": 0.2257, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04846335697399527, |
|
"grad_norm": 0.20284941792488098, |
|
"learning_rate": 0.00028072263654409154, |
|
"loss": 0.3324, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04964539007092199, |
|
"grad_norm": 0.09100235253572464, |
|
"learning_rate": 0.0002794884721436361, |
|
"loss": 0.0292, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0508274231678487, |
|
"grad_norm": 0.3754136562347412, |
|
"learning_rate": 0.00027821890688783083, |
|
"loss": 0.2822, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05200945626477541, |
|
"grad_norm": 0.03608919680118561, |
|
"learning_rate": 0.0002769142878631403, |
|
"loss": 0.0019, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05319148936170213, |
|
"grad_norm": 0.2937483787536621, |
|
"learning_rate": 0.00027557497173937923, |
|
"loss": 0.2582, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.054373522458628844, |
|
"grad_norm": 0.1442236453294754, |
|
"learning_rate": 0.000274201324672203, |
|
"loss": 0.0591, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 0.19208501279354095, |
|
"learning_rate": 0.00027279372220300385, |
|
"loss": 0.0851, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.05673758865248227, |
|
"grad_norm": 0.02040451020002365, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.0009, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.057919621749408984, |
|
"grad_norm": 0.36763325333595276, |
|
"learning_rate": 0.00026987819953423867, |
|
"loss": 0.1239, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0591016548463357, |
|
"grad_norm": 0.5783975124359131, |
|
"learning_rate": 0.00026837107640945905, |
|
"loss": 0.0694, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0591016548463357, |
|
"eval_loss": 0.12837110459804535, |
|
"eval_runtime": 6.8069, |
|
"eval_samples_per_second": 7.346, |
|
"eval_steps_per_second": 1.028, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06028368794326241, |
|
"grad_norm": 0.23129263520240784, |
|
"learning_rate": 0.0002668315918143169, |
|
"loss": 0.4675, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.061465721040189124, |
|
"grad_norm": 0.3005974590778351, |
|
"learning_rate": 0.00026526016662852886, |
|
"loss": 0.5101, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.06264775413711583, |
|
"grad_norm": 0.15042176842689514, |
|
"learning_rate": 0.00026365723046405023, |
|
"loss": 0.3429, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.06382978723404255, |
|
"grad_norm": 0.1577768176794052, |
|
"learning_rate": 0.0002620232215476231, |
|
"loss": 0.5794, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.06501182033096926, |
|
"grad_norm": 0.19460776448249817, |
|
"learning_rate": 0.0002603585866009697, |
|
"loss": 0.6019, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06619385342789598, |
|
"grad_norm": 0.18861106038093567, |
|
"learning_rate": 0.00025866378071866334, |
|
"loss": 0.7549, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0673758865248227, |
|
"grad_norm": 0.14117053151130676, |
|
"learning_rate": 0.00025693926724370956, |
|
"loss": 0.459, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.06855791962174941, |
|
"grad_norm": 0.22173011302947998, |
|
"learning_rate": 0.00025518551764087326, |
|
"loss": 0.5504, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.06973995271867613, |
|
"grad_norm": 0.25081151723861694, |
|
"learning_rate": 0.00025340301136778483, |
|
"loss": 0.524, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.07092198581560284, |
|
"grad_norm": 0.13126404583454132, |
|
"learning_rate": 0.00025159223574386114, |
|
"loss": 0.3677, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07210401891252956, |
|
"grad_norm": 0.13712657988071442, |
|
"learning_rate": 0.0002497536858170772, |
|
"loss": 0.4722, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.07328605200945626, |
|
"grad_norm": 0.2549358904361725, |
|
"learning_rate": 0.00024788786422862526, |
|
"loss": 0.8239, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.07446808510638298, |
|
"grad_norm": 0.14273299276828766, |
|
"learning_rate": 0.00024599528107549745, |
|
"loss": 0.3742, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.07565011820330969, |
|
"grad_norm": 0.19160489737987518, |
|
"learning_rate": 0.00024407645377103054, |
|
"loss": 0.6387, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0768321513002364, |
|
"grad_norm": 0.15993249416351318, |
|
"learning_rate": 0.00024213190690345018, |
|
"loss": 0.5228, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07801418439716312, |
|
"grad_norm": 0.15967561304569244, |
|
"learning_rate": 0.00024016217209245374, |
|
"loss": 0.4422, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.07919621749408984, |
|
"grad_norm": 0.1872466653585434, |
|
"learning_rate": 0.00023816778784387094, |
|
"loss": 0.4686, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.08037825059101655, |
|
"grad_norm": 0.1232978031039238, |
|
"learning_rate": 0.0002361492994024415, |
|
"loss": 0.3756, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.08156028368794327, |
|
"grad_norm": 0.15848113596439362, |
|
"learning_rate": 0.0002341072586027509, |
|
"loss": 0.4773, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.08274231678486997, |
|
"grad_norm": 0.1522669494152069, |
|
"learning_rate": 0.00023204222371836405, |
|
"loss": 0.433, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08392434988179669, |
|
"grad_norm": 0.37246736884117126, |
|
"learning_rate": 0.00022995475930919905, |
|
"loss": 0.3253, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0851063829787234, |
|
"grad_norm": 0.18676169216632843, |
|
"learning_rate": 0.00022784543606718227, |
|
"loss": 0.3645, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.08628841607565012, |
|
"grad_norm": 0.1256495714187622, |
|
"learning_rate": 0.00022571483066022657, |
|
"loss": 0.25, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.08747044917257683, |
|
"grad_norm": 0.128931924700737, |
|
"learning_rate": 0.0002235635255745762, |
|
"loss": 0.2731, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.08865248226950355, |
|
"grad_norm": 0.13608215749263763, |
|
"learning_rate": 0.00022139210895556104, |
|
"loss": 0.3361, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08865248226950355, |
|
"eval_loss": 0.11915016919374466, |
|
"eval_runtime": 6.6761, |
|
"eval_samples_per_second": 7.489, |
|
"eval_steps_per_second": 1.049, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08983451536643026, |
|
"grad_norm": 0.1711040437221527, |
|
"learning_rate": 0.00021920117444680317, |
|
"loss": 0.4544, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.09101654846335698, |
|
"grad_norm": 0.24072404205799103, |
|
"learning_rate": 0.00021699132102792097, |
|
"loss": 0.2764, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.09219858156028368, |
|
"grad_norm": 0.15333881974220276, |
|
"learning_rate": 0.0002147631528507739, |
|
"loss": 0.3369, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.0933806146572104, |
|
"grad_norm": 0.14597493410110474, |
|
"learning_rate": 0.00021251727907429355, |
|
"loss": 0.2058, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.09456264775413711, |
|
"grad_norm": 0.15312355756759644, |
|
"learning_rate": 0.0002102543136979454, |
|
"loss": 0.278, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09574468085106383, |
|
"grad_norm": 0.1510908156633377, |
|
"learning_rate": 0.0002079748753938678, |
|
"loss": 0.2616, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.09692671394799054, |
|
"grad_norm": 0.1632828563451767, |
|
"learning_rate": 0.0002056795873377331, |
|
"loss": 0.2734, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.09810874704491726, |
|
"grad_norm": 0.21976338326931, |
|
"learning_rate": 0.00020336907703837748, |
|
"loss": 0.3517, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.09929078014184398, |
|
"grad_norm": 0.18902301788330078, |
|
"learning_rate": 0.00020104397616624645, |
|
"loss": 0.3057, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.10047281323877069, |
|
"grad_norm": 0.17857129871845245, |
|
"learning_rate": 0.00019870492038070252, |
|
"loss": 0.2618, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1016548463356974, |
|
"grad_norm": 0.14383412897586823, |
|
"learning_rate": 0.0001963525491562421, |
|
"loss": 0.1557, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.10283687943262411, |
|
"grad_norm": 0.18979600071907043, |
|
"learning_rate": 0.0001939875056076697, |
|
"loss": 0.2969, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.10401891252955082, |
|
"grad_norm": 0.23204195499420166, |
|
"learning_rate": 0.00019161043631427666, |
|
"loss": 0.3901, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.10520094562647754, |
|
"grad_norm": 0.23629039525985718, |
|
"learning_rate": 0.00018922199114307294, |
|
"loss": 0.3413, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.10638297872340426, |
|
"grad_norm": 0.12269175052642822, |
|
"learning_rate": 0.00018682282307111987, |
|
"loss": 0.0636, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10756501182033097, |
|
"grad_norm": 1.75273597240448, |
|
"learning_rate": 0.00018441358800701273, |
|
"loss": 0.2072, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.10874704491725769, |
|
"grad_norm": 0.07240819931030273, |
|
"learning_rate": 0.00018199494461156203, |
|
"loss": 0.0383, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.1099290780141844, |
|
"grad_norm": 0.1977987140417099, |
|
"learning_rate": 0.000179567554117722, |
|
"loss": 0.1972, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.15330693125724792, |
|
"learning_rate": 0.00017713208014981648, |
|
"loss": 0.1202, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.11229314420803782, |
|
"grad_norm": 0.2938881516456604, |
|
"learning_rate": 0.00017468918854211007, |
|
"loss": 0.2919, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.11347517730496454, |
|
"grad_norm": 0.19431200623512268, |
|
"learning_rate": 0.00017223954715677627, |
|
"loss": 0.1018, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.11465721040189125, |
|
"grad_norm": 0.27025851607322693, |
|
"learning_rate": 0.00016978382570131034, |
|
"loss": 0.243, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.11583924349881797, |
|
"grad_norm": 0.12175106257200241, |
|
"learning_rate": 0.00016732269554543794, |
|
"loss": 0.08, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.11702127659574468, |
|
"grad_norm": 0.18022122979164124, |
|
"learning_rate": 0.00016485682953756942, |
|
"loss": 0.0626, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.1182033096926714, |
|
"grad_norm": 0.3214963674545288, |
|
"learning_rate": 0.00016238690182084986, |
|
"loss": 0.1805, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1182033096926714, |
|
"eval_loss": 0.1298762410879135, |
|
"eval_runtime": 6.7923, |
|
"eval_samples_per_second": 7.361, |
|
"eval_steps_per_second": 1.031, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.923209170476073e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|