|
{ |
|
"best_metric": 1.517699122428894, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.03702104029123218, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004936138705497624, |
|
"grad_norm": 8.610563278198242, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.296, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004936138705497624, |
|
"eval_loss": 3.618351697921753, |
|
"eval_runtime": 2.0991, |
|
"eval_samples_per_second": 23.82, |
|
"eval_steps_per_second": 6.193, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009872277410995249, |
|
"grad_norm": 9.678094863891602, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 3.4229, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014808416116492873, |
|
"grad_norm": 8.572405815124512, |
|
"learning_rate": 0.0001, |
|
"loss": 3.2758, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0019744554821990497, |
|
"grad_norm": 3.8493947982788086, |
|
"learning_rate": 9.997376600647783e-05, |
|
"loss": 2.3979, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002468069352748812, |
|
"grad_norm": 2.470167398452759, |
|
"learning_rate": 9.989509461357426e-05, |
|
"loss": 2.0289, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0029616832232985746, |
|
"grad_norm": 1.9393905401229858, |
|
"learning_rate": 9.976407754861426e-05, |
|
"loss": 2.2227, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.003455297093848337, |
|
"grad_norm": 1.9276494979858398, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 1.9944, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0039489109643980995, |
|
"grad_norm": 1.5310888290405273, |
|
"learning_rate": 9.934567829727386e-05, |
|
"loss": 1.7675, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004442524834947862, |
|
"grad_norm": 1.3651423454284668, |
|
"learning_rate": 9.905878394570453e-05, |
|
"loss": 1.8709, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004936138705497624, |
|
"grad_norm": 1.3343584537506104, |
|
"learning_rate": 9.872051902290737e-05, |
|
"loss": 1.7253, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005429752576047387, |
|
"grad_norm": 1.2432461977005005, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 1.8579, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005923366446597149, |
|
"grad_norm": 1.0454318523406982, |
|
"learning_rate": 9.789151450663723e-05, |
|
"loss": 1.7956, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006416980317146912, |
|
"grad_norm": 1.0827058553695679, |
|
"learning_rate": 9.740174149534693e-05, |
|
"loss": 1.6771, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006910594187696674, |
|
"grad_norm": 0.9257851839065552, |
|
"learning_rate": 9.686252995020249e-05, |
|
"loss": 1.7253, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007404208058246437, |
|
"grad_norm": 1.018771767616272, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 1.708, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007897821928796199, |
|
"grad_norm": 0.8964956402778625, |
|
"learning_rate": 9.563836295460398e-05, |
|
"loss": 1.6949, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.008391435799345962, |
|
"grad_norm": 0.9759191870689392, |
|
"learning_rate": 9.495483482810688e-05, |
|
"loss": 1.5602, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008885049669895725, |
|
"grad_norm": 1.0305898189544678, |
|
"learning_rate": 9.422472115147382e-05, |
|
"loss": 1.653, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009378663540445486, |
|
"grad_norm": 0.9951354265213013, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 1.787, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009872277410995249, |
|
"grad_norm": 0.8011742830276489, |
|
"learning_rate": 9.2628195591462e-05, |
|
"loss": 1.639, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010365891281545012, |
|
"grad_norm": 0.8728142976760864, |
|
"learning_rate": 9.176364518546989e-05, |
|
"loss": 1.6791, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.010859505152094774, |
|
"grad_norm": 0.8905225396156311, |
|
"learning_rate": 9.08562300137157e-05, |
|
"loss": 1.5861, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011353119022644536, |
|
"grad_norm": 0.8705262541770935, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 1.4812, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011846732893194298, |
|
"grad_norm": 0.8652294874191284, |
|
"learning_rate": 8.891708613973126e-05, |
|
"loss": 1.5792, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012340346763744061, |
|
"grad_norm": 0.916961133480072, |
|
"learning_rate": 8.788761839251559e-05, |
|
"loss": 1.4967, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012340346763744061, |
|
"eval_loss": 1.6187465190887451, |
|
"eval_runtime": 2.171, |
|
"eval_samples_per_second": 23.03, |
|
"eval_steps_per_second": 5.988, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012833960634293824, |
|
"grad_norm": 0.9111001491546631, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.643, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013327574504843585, |
|
"grad_norm": 0.869102418422699, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 1.6734, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013821188375393348, |
|
"grad_norm": 0.842080295085907, |
|
"learning_rate": 8.457416554680877e-05, |
|
"loss": 1.476, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014314802245943111, |
|
"grad_norm": 0.835245668888092, |
|
"learning_rate": 8.339895749467238e-05, |
|
"loss": 1.6803, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014808416116492874, |
|
"grad_norm": 0.7700492739677429, |
|
"learning_rate": 8.219063752844926e-05, |
|
"loss": 1.4141, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015302029987042635, |
|
"grad_norm": 0.8414303064346313, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 1.5424, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.015795643857592398, |
|
"grad_norm": 0.8434069752693176, |
|
"learning_rate": 7.968033420621935e-05, |
|
"loss": 1.5554, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.01628925772814216, |
|
"grad_norm": 0.8102946877479553, |
|
"learning_rate": 7.838127775159452e-05, |
|
"loss": 1.5322, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.016782871598691924, |
|
"grad_norm": 0.777534544467926, |
|
"learning_rate": 7.705495977301078e-05, |
|
"loss": 1.5407, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.017276485469241685, |
|
"grad_norm": 0.8392916321754456, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 1.5694, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01777009933979145, |
|
"grad_norm": 0.8198193907737732, |
|
"learning_rate": 7.43267549363537e-05, |
|
"loss": 1.5633, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01826371321034121, |
|
"grad_norm": 0.8337418437004089, |
|
"learning_rate": 7.292804904308087e-05, |
|
"loss": 1.563, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01875732708089097, |
|
"grad_norm": 0.7560647130012512, |
|
"learning_rate": 7.150843984658754e-05, |
|
"loss": 1.4525, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.019250940951440736, |
|
"grad_norm": 0.7717271447181702, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 1.5498, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.019744554821990497, |
|
"grad_norm": 0.7649319767951965, |
|
"learning_rate": 6.861315478964841e-05, |
|
"loss": 1.529, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020238168692540262, |
|
"grad_norm": 0.747653067111969, |
|
"learning_rate": 6.714085470206609e-05, |
|
"loss": 1.388, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.020731782563090023, |
|
"grad_norm": 0.7593997120857239, |
|
"learning_rate": 6.56543989209901e-05, |
|
"loss": 1.4656, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021225396433639784, |
|
"grad_norm": 0.7581989765167236, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 1.5301, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02171901030418955, |
|
"grad_norm": 0.8096319437026978, |
|
"learning_rate": 6.264596732629e-05, |
|
"loss": 1.5115, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02221262417473931, |
|
"grad_norm": 0.8088666200637817, |
|
"learning_rate": 6.112749920933111e-05, |
|
"loss": 1.5176, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02270623804528907, |
|
"grad_norm": 0.7563244700431824, |
|
"learning_rate": 5.960188670239154e-05, |
|
"loss": 1.3766, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.023199851915838836, |
|
"grad_norm": 0.8245486617088318, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 1.4407, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.023693465786388597, |
|
"grad_norm": 0.8189464807510376, |
|
"learning_rate": 5.653634995836856e-05, |
|
"loss": 1.4207, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02418707965693836, |
|
"grad_norm": 0.86505126953125, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.5054, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.024680693527488123, |
|
"grad_norm": 0.9769317507743835, |
|
"learning_rate": 5.346365004163145e-05, |
|
"loss": 1.505, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024680693527488123, |
|
"eval_loss": 1.558501124382019, |
|
"eval_runtime": 2.151, |
|
"eval_samples_per_second": 23.245, |
|
"eval_steps_per_second": 6.044, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025174307398037884, |
|
"grad_norm": 0.7213638424873352, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 1.411, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.02566792126858765, |
|
"grad_norm": 0.7571468949317932, |
|
"learning_rate": 5.0398113297608465e-05, |
|
"loss": 1.5454, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.02616153513913741, |
|
"grad_norm": 0.717697024345398, |
|
"learning_rate": 4.887250079066892e-05, |
|
"loss": 1.4686, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.02665514900968717, |
|
"grad_norm": 0.7320601344108582, |
|
"learning_rate": 4.7354032673710005e-05, |
|
"loss": 1.4823, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.027148762880236935, |
|
"grad_norm": 0.7738489508628845, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 1.4721, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.027642376750786696, |
|
"grad_norm": 0.7395352721214294, |
|
"learning_rate": 4.43456010790099e-05, |
|
"loss": 1.5485, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.02813599062133646, |
|
"grad_norm": 0.7061799764633179, |
|
"learning_rate": 4.285914529793391e-05, |
|
"loss": 1.4671, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.028629604491886222, |
|
"grad_norm": 0.7371093034744263, |
|
"learning_rate": 4.13868452103516e-05, |
|
"loss": 1.5273, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.029123218362435983, |
|
"grad_norm": 0.7502743005752563, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 1.5082, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.029616832232985748, |
|
"grad_norm": 0.7478111982345581, |
|
"learning_rate": 3.8491560153412466e-05, |
|
"loss": 1.4492, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03011044610353551, |
|
"grad_norm": 0.7804875373840332, |
|
"learning_rate": 3.707195095691913e-05, |
|
"loss": 1.5395, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.03060405997408527, |
|
"grad_norm": 0.828667938709259, |
|
"learning_rate": 3.567324506364632e-05, |
|
"loss": 1.4686, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.031097673844635035, |
|
"grad_norm": 0.7223063111305237, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 1.4567, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.031591287715184796, |
|
"grad_norm": 0.8763315677642822, |
|
"learning_rate": 3.2945040226989244e-05, |
|
"loss": 1.5132, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.03208490158573456, |
|
"grad_norm": 0.8217432498931885, |
|
"learning_rate": 3.16187222484055e-05, |
|
"loss": 1.4729, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03257851545628432, |
|
"grad_norm": 0.7187386155128479, |
|
"learning_rate": 3.0319665793780648e-05, |
|
"loss": 1.4075, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.033072129326834086, |
|
"grad_norm": 0.7990232110023499, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 1.6426, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.03356574319738385, |
|
"grad_norm": 0.7265430688858032, |
|
"learning_rate": 2.7809362471550748e-05, |
|
"loss": 1.4177, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.03405935706793361, |
|
"grad_norm": 0.7410502433776855, |
|
"learning_rate": 2.660104250532764e-05, |
|
"loss": 1.5169, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.03455297093848337, |
|
"grad_norm": 0.8333743214607239, |
|
"learning_rate": 2.5425834453191232e-05, |
|
"loss": 1.5395, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03504658480903313, |
|
"grad_norm": 0.7417758703231812, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 1.4021, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.0355401986795829, |
|
"grad_norm": 0.757122814655304, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.5029, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.03603381255013266, |
|
"grad_norm": 0.8034988045692444, |
|
"learning_rate": 2.2112381607484417e-05, |
|
"loss": 1.5196, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.03652742642068242, |
|
"grad_norm": 0.8066292405128479, |
|
"learning_rate": 2.1082913860268765e-05, |
|
"loss": 1.4421, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.03702104029123218, |
|
"grad_norm": 0.9166308045387268, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 1.5344, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.03702104029123218, |
|
"eval_loss": 1.517699122428894, |
|
"eval_runtime": 2.1573, |
|
"eval_samples_per_second": 23.177, |
|
"eval_steps_per_second": 6.026, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 95, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.43721341763584e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|