|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9915764139590855, |
|
"eval_steps": 25, |
|
"global_step": 103, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.009626955475330927, |
|
"grad_norm": 36.655242919921875, |
|
"learning_rate": 9.997674418116759e-06, |
|
"loss": 10.061, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.019253910950661854, |
|
"grad_norm": 29.486038208007812, |
|
"learning_rate": 9.99069983579947e-06, |
|
"loss": 9.9813, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02888086642599278, |
|
"grad_norm": 16.687410354614258, |
|
"learning_rate": 9.979082741033047e-06, |
|
"loss": 9.5428, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03850782190132371, |
|
"grad_norm": 19.905546188354492, |
|
"learning_rate": 9.96283394041954e-06, |
|
"loss": 9.7047, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.048134777376654635, |
|
"grad_norm": 21.7939453125, |
|
"learning_rate": 9.941968549125481e-06, |
|
"loss": 9.7184, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05776173285198556, |
|
"grad_norm": Infinity, |
|
"learning_rate": 9.941968549125481e-06, |
|
"loss": 9.7494, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06738868832731648, |
|
"grad_norm": 37.23790740966797, |
|
"learning_rate": 9.916505976821262e-06, |
|
"loss": 9.8672, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07701564380264742, |
|
"grad_norm": 57.11663055419922, |
|
"learning_rate": 9.886469909625624e-06, |
|
"loss": 9.9945, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08664259927797834, |
|
"grad_norm": 32.410648345947266, |
|
"learning_rate": 9.851888288072053e-06, |
|
"loss": 9.832, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.09626955475330927, |
|
"grad_norm": 20.996421813964844, |
|
"learning_rate": 9.81279328111758e-06, |
|
"loss": 9.6547, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10589651022864019, |
|
"grad_norm": 29.850004196166992, |
|
"learning_rate": 9.769221256218165e-06, |
|
"loss": 9.634, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11552346570397112, |
|
"grad_norm": 20.321626663208008, |
|
"learning_rate": 9.721212745498493e-06, |
|
"loss": 9.5294, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12515042117930206, |
|
"grad_norm": 21.042863845825195, |
|
"learning_rate": 9.66881240804768e-06, |
|
"loss": 9.4986, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13477737665463296, |
|
"grad_norm": 39.393123626708984, |
|
"learning_rate": 9.612068988375898e-06, |
|
"loss": 9.7214, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1444043321299639, |
|
"grad_norm": 29.8813533782959, |
|
"learning_rate": 9.551035271070665e-06, |
|
"loss": 9.6157, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15403128760529483, |
|
"grad_norm": 22.601821899414062, |
|
"learning_rate": 9.485768031694872e-06, |
|
"loss": 9.5022, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.16365824308062576, |
|
"grad_norm": 38.687896728515625, |
|
"learning_rate": 9.416327983972304e-06, |
|
"loss": 9.5861, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.17328519855595667, |
|
"grad_norm": 40.1544303894043, |
|
"learning_rate": 9.342779723309746e-06, |
|
"loss": 9.6032, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.1829121540312876, |
|
"grad_norm": 31.16316032409668, |
|
"learning_rate": 9.26519166670821e-06, |
|
"loss": 9.4739, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.19253910950661854, |
|
"grad_norm": 25.174264907836914, |
|
"learning_rate": 9.183635989119211e-06, |
|
"loss": 9.3975, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20216606498194944, |
|
"grad_norm": 21.90279197692871, |
|
"learning_rate": 9.098188556305262e-06, |
|
"loss": 9.3676, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.21179302045728038, |
|
"grad_norm": 27.172040939331055, |
|
"learning_rate": 9.008928854267054e-06, |
|
"loss": 9.3662, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2214199759326113, |
|
"grad_norm": 35.05919647216797, |
|
"learning_rate": 8.91593991530297e-06, |
|
"loss": 9.3918, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.23104693140794225, |
|
"grad_norm": 59.50571823120117, |
|
"learning_rate": 8.819308240769726e-06, |
|
"loss": 9.4201, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.24067388688327315, |
|
"grad_norm": 31.860584259033203, |
|
"learning_rate": 8.71912372061598e-06, |
|
"loss": 9.345, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24067388688327315, |
|
"eval_clap": 0.21781522035598755, |
|
"eval_loss": 3.680818557739258, |
|
"eval_runtime": 197.7141, |
|
"eval_samples_per_second": 0.162, |
|
"eval_steps_per_second": 0.162, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2503008423586041, |
|
"grad_norm": 35.62887191772461, |
|
"learning_rate": 8.615479549763756e-06, |
|
"loss": 9.2727, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.259927797833935, |
|
"grad_norm": 29.90762710571289, |
|
"learning_rate": 8.508472141415468e-06, |
|
"loss": 9.2321, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2695547533092659, |
|
"grad_norm": 25.5911808013916, |
|
"learning_rate": 8.398201037367202e-06, |
|
"loss": 9.1778, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27918170878459686, |
|
"grad_norm": 22.98084259033203, |
|
"learning_rate": 8.284768815411693e-06, |
|
"loss": 9.1713, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2888086642599278, |
|
"grad_norm": 23.99754524230957, |
|
"learning_rate": 8.168280993917078e-06, |
|
"loss": 9.1769, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29843561973525873, |
|
"grad_norm": 26.287574768066406, |
|
"learning_rate": 8.048845933670274e-06, |
|
"loss": 9.0526, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.30806257521058966, |
|
"grad_norm": 47.93935775756836, |
|
"learning_rate": 7.92657473707621e-06, |
|
"loss": 9.1205, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3176895306859206, |
|
"grad_norm": 30.085956573486328, |
|
"learning_rate": 7.801581144806752e-06, |
|
"loss": 8.9902, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.32731648616125153, |
|
"grad_norm": 55.95182418823242, |
|
"learning_rate": 7.673981429995372e-06, |
|
"loss": 9.0223, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3369434416365824, |
|
"grad_norm": 43.94280242919922, |
|
"learning_rate": 7.5438942900761035e-06, |
|
"loss": 9.0648, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.34657039711191334, |
|
"grad_norm": 44.075782775878906, |
|
"learning_rate": 7.411440736367281e-06, |
|
"loss": 9.0315, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3561973525872443, |
|
"grad_norm": 36.636837005615234, |
|
"learning_rate": 7.276743981502856e-06, |
|
"loss": 8.9343, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3658243080625752, |
|
"grad_norm": 57.34428787231445, |
|
"learning_rate": 7.139929324815965e-06, |
|
"loss": 8.9535, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.37545126353790614, |
|
"grad_norm": 33.31437301635742, |
|
"learning_rate": 7.00112403578139e-06, |
|
"loss": 8.831, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3850782190132371, |
|
"grad_norm": 41.02770233154297, |
|
"learning_rate": 6.860457235625322e-06, |
|
"loss": 8.7563, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.394705174488568, |
|
"grad_norm": Infinity, |
|
"learning_rate": 6.860457235625322e-06, |
|
"loss": 8.7286, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4043321299638989, |
|
"grad_norm": 36.15497970581055, |
|
"learning_rate": 6.7180597772125665e-06, |
|
"loss": 8.8619, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4139590854392298, |
|
"grad_norm": 27.45692253112793, |
|
"learning_rate": 6.574064123322925e-06, |
|
"loss": 8.7769, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.42358604091456076, |
|
"grad_norm": 44.91440200805664, |
|
"learning_rate": 6.42860422342998e-06, |
|
"loss": 8.8987, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4332129963898917, |
|
"grad_norm": 49.40604782104492, |
|
"learning_rate": 6.281815389096903e-06, |
|
"loss": 8.5464, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4428399518652226, |
|
"grad_norm": 38.211082458496094, |
|
"learning_rate": 6.133834168105206e-06, |
|
"loss": 8.6558, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.45246690734055356, |
|
"grad_norm": 40.19668960571289, |
|
"learning_rate": 5.9847982174335314e-06, |
|
"loss": 8.6081, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4620938628158845, |
|
"grad_norm": 31.05793571472168, |
|
"learning_rate": 5.834846175204612e-06, |
|
"loss": 8.6122, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4717208182912154, |
|
"grad_norm": 54.35275650024414, |
|
"learning_rate": 5.684117531719552e-06, |
|
"loss": 8.5534, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4813477737665463, |
|
"grad_norm": 31.807424545288086, |
|
"learning_rate": 5.532752499699381e-06, |
|
"loss": 8.5598, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4813477737665463, |
|
"eval_clap": 0.2060224413871765, |
|
"eval_loss": 3.6995468139648438, |
|
"eval_runtime": 203.4451, |
|
"eval_samples_per_second": 0.157, |
|
"eval_steps_per_second": 0.157, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.49097472924187724, |
|
"grad_norm": 32.47773742675781, |
|
"learning_rate": 5.380891883854591e-06, |
|
"loss": 8.5232, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5006016847172082, |
|
"grad_norm": 32.18419647216797, |
|
"learning_rate": 5.228676949903974e-06, |
|
"loss": 8.5755, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5102286401925391, |
|
"grad_norm": 40.90557098388672, |
|
"learning_rate": 5.07624929316463e-06, |
|
"loss": 8.4902, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.51985559566787, |
|
"grad_norm": 33.113006591796875, |
|
"learning_rate": 4.923750706835371e-06, |
|
"loss": 8.5366, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.529482551143201, |
|
"grad_norm": 38.09598159790039, |
|
"learning_rate": 4.771323050096028e-06, |
|
"loss": 8.5472, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5391095066185319, |
|
"grad_norm": 27.295963287353516, |
|
"learning_rate": 4.619108116145411e-06, |
|
"loss": 8.5864, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5487364620938628, |
|
"grad_norm": 43.426124572753906, |
|
"learning_rate": 4.467247500300621e-06, |
|
"loss": 8.3008, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5583634175691937, |
|
"grad_norm": 26.15068817138672, |
|
"learning_rate": 4.31588246828045e-06, |
|
"loss": 8.5288, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5679903730445247, |
|
"grad_norm": 28.06884765625, |
|
"learning_rate": 4.165153824795391e-06, |
|
"loss": 8.4413, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5776173285198556, |
|
"grad_norm": 37.16442108154297, |
|
"learning_rate": 4.015201782566471e-06, |
|
"loss": 8.2101, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5872442839951865, |
|
"grad_norm": 45.41092300415039, |
|
"learning_rate": 3.866165831894796e-06, |
|
"loss": 8.1815, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5968712394705175, |
|
"grad_norm": 35.478515625, |
|
"learning_rate": 3.7181846109031007e-06, |
|
"loss": 8.379, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6064981949458483, |
|
"grad_norm": 34.70208740234375, |
|
"learning_rate": 3.5713957765700224e-06, |
|
"loss": 8.2477, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6161251504211793, |
|
"grad_norm": 32.52558135986328, |
|
"learning_rate": 3.425935876677077e-06, |
|
"loss": 8.2687, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6257521058965102, |
|
"grad_norm": 59.53068923950195, |
|
"learning_rate": 3.2819402227874364e-06, |
|
"loss": 7.8701, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.6353790613718412, |
|
"grad_norm": 25.783588409423828, |
|
"learning_rate": 3.1395427643746802e-06, |
|
"loss": 8.3832, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.6450060168471721, |
|
"grad_norm": 30.451751708984375, |
|
"learning_rate": 2.99887596421861e-06, |
|
"loss": 8.2528, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.6546329723225031, |
|
"grad_norm": 36.75849914550781, |
|
"learning_rate": 2.860070675184036e-06, |
|
"loss": 8.1908, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6642599277978339, |
|
"grad_norm": 36.29214859008789, |
|
"learning_rate": 2.7232560184971437e-06, |
|
"loss": 8.0419, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6738868832731648, |
|
"grad_norm": 40.11077880859375, |
|
"learning_rate": 2.588559263632719e-06, |
|
"loss": 8.001, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6835138387484958, |
|
"grad_norm": 43.78921890258789, |
|
"learning_rate": 2.4561057099238973e-06, |
|
"loss": 8.1344, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6931407942238267, |
|
"grad_norm": 18.768470764160156, |
|
"learning_rate": 2.3260185700046295e-06, |
|
"loss": 8.4484, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.7027677496991577, |
|
"grad_norm": 36.79136657714844, |
|
"learning_rate": 2.1984188551932513e-06, |
|
"loss": 8.078, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7123947051744886, |
|
"grad_norm": 23.20965003967285, |
|
"learning_rate": 2.0734252629237892e-06, |
|
"loss": 8.3404, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7220216606498195, |
|
"grad_norm": 23.93864631652832, |
|
"learning_rate": 1.9511540663297284e-06, |
|
"loss": 8.3691, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7220216606498195, |
|
"eval_clap": 0.21875908970832825, |
|
"eval_loss": 3.7276365756988525, |
|
"eval_runtime": 203.3924, |
|
"eval_samples_per_second": 0.157, |
|
"eval_steps_per_second": 0.157, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7316486161251504, |
|
"grad_norm": 41.18170166015625, |
|
"learning_rate": 1.8317190060829242e-06, |
|
"loss": 7.9022, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7412755716004813, |
|
"grad_norm": 25.832717895507812, |
|
"learning_rate": 1.7152311845883096e-06, |
|
"loss": 8.1734, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7509025270758123, |
|
"grad_norm": 26.72269630432129, |
|
"learning_rate": 1.601798962632799e-06, |
|
"loss": 8.1561, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7605294825511432, |
|
"grad_norm": 28.269739151000977, |
|
"learning_rate": 1.491527858584535e-06, |
|
"loss": 8.0723, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7701564380264742, |
|
"grad_norm": 37.14287185668945, |
|
"learning_rate": 1.3845204502362442e-06, |
|
"loss": 7.8806, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.779783393501805, |
|
"grad_norm": 28.547544479370117, |
|
"learning_rate": 1.28087627938402e-06, |
|
"loss": 8.09, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.789410348977136, |
|
"grad_norm": 24.613683700561523, |
|
"learning_rate": 1.1806917592302763e-06, |
|
"loss": 8.2687, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7990373044524669, |
|
"grad_norm": 36.22146987915039, |
|
"learning_rate": 1.0840600846970333e-06, |
|
"loss": 8.1399, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.8086642599277978, |
|
"grad_norm": 38.009300231933594, |
|
"learning_rate": 9.91071145732948e-07, |
|
"loss": 7.8937, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8182912154031288, |
|
"grad_norm": 42.18280792236328, |
|
"learning_rate": 9.018114436947373e-07, |
|
"loss": 7.9889, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8279181708784596, |
|
"grad_norm": 28.27996826171875, |
|
"learning_rate": 8.163640108807897e-07, |
|
"loss": 8.094, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8375451263537906, |
|
"grad_norm": 53.69871139526367, |
|
"learning_rate": 7.348083332917927e-07, |
|
"loss": 7.7992, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8471720818291215, |
|
"grad_norm": 25.41583251953125, |
|
"learning_rate": 6.572202766902569e-07, |
|
"loss": 8.2104, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8567990373044525, |
|
"grad_norm": 54.796871185302734, |
|
"learning_rate": 5.836720160276971e-07, |
|
"loss": 7.9115, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8664259927797834, |
|
"grad_norm": 23.627187728881836, |
|
"learning_rate": 5.1423196830513e-07, |
|
"loss": 8.2171, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8760529482551144, |
|
"grad_norm": 34.87358856201172, |
|
"learning_rate": 4.4896472892933693e-07, |
|
"loss": 8.0587, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8856799037304453, |
|
"grad_norm": 42.69806671142578, |
|
"learning_rate": 3.8793101162410417e-07, |
|
"loss": 7.7615, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8953068592057761, |
|
"grad_norm": 22.716751098632812, |
|
"learning_rate": 3.3118759195232273e-07, |
|
"loss": 8.2045, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.9049338146811071, |
|
"grad_norm": 47.218284606933594, |
|
"learning_rate": 2.787872545015069e-07, |
|
"loss": 7.684, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.914560770156438, |
|
"grad_norm": 24.743898391723633, |
|
"learning_rate": 2.307787437818365e-07, |
|
"loss": 8.1474, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.924187725631769, |
|
"grad_norm": 27.12813377380371, |
|
"learning_rate": 1.8720671888242058e-07, |
|
"loss": 8.2783, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9338146811070999, |
|
"grad_norm": 45.253604888916016, |
|
"learning_rate": 1.4811171192794628e-07, |
|
"loss": 7.945, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9434416365824309, |
|
"grad_norm": 30.242605209350586, |
|
"learning_rate": 1.1353009037437523e-07, |
|
"loss": 8.1772, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9530685920577617, |
|
"grad_norm": 37.83307647705078, |
|
"learning_rate": 8.34940231787379e-08, |
|
"loss": 7.8305, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.9626955475330926, |
|
"grad_norm": 23.995698928833008, |
|
"learning_rate": 5.803145087451945e-08, |
|
"loss": 8.1954, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9626955475330926, |
|
"eval_clap": 0.2072528600692749, |
|
"eval_loss": 3.7368812561035156, |
|
"eval_runtime": 204.8586, |
|
"eval_samples_per_second": 0.156, |
|
"eval_steps_per_second": 0.156, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9723225030084236, |
|
"grad_norm": 44.95185470581055, |
|
"learning_rate": 3.716605958046071e-08, |
|
"loss": 7.7546, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9819494584837545, |
|
"grad_norm": 38.132503509521484, |
|
"learning_rate": 2.0917258966953735e-08, |
|
"loss": 7.8938, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9915764139590855, |
|
"grad_norm": 33.25323486328125, |
|
"learning_rate": 9.300164200530815e-09, |
|
"loss": 7.895, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9915764139590855, |
|
"step": 103, |
|
"total_flos": 171610142427840.0, |
|
"train_loss": 8.696112142025846, |
|
"train_runtime": 1142.9145, |
|
"train_samples_per_second": 0.727, |
|
"train_steps_per_second": 0.09 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 103, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 171610142427840.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|