|
{ |
|
"best_metric": 0.28519564867019653, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-500", |
|
"epoch": 0.015524816419045845, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.104963283809169e-05, |
|
"eval_loss": 3.72892689704895, |
|
"eval_runtime": 1163.8988, |
|
"eval_samples_per_second": 11.651, |
|
"eval_steps_per_second": 2.913, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003104963283809169, |
|
"grad_norm": 3.2471072673797607, |
|
"learning_rate": 4.2000000000000004e-05, |
|
"loss": 3.1999, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0006209926567618338, |
|
"grad_norm": 4.48224401473999, |
|
"learning_rate": 8.400000000000001e-05, |
|
"loss": 2.466, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0009314889851427506, |
|
"grad_norm": 4.198734283447266, |
|
"learning_rate": 0.000126, |
|
"loss": 0.818, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0012419853135236677, |
|
"grad_norm": 0.7716361284255981, |
|
"learning_rate": 0.00016800000000000002, |
|
"loss": 0.3401, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0015524816419045845, |
|
"grad_norm": 0.675552248954773, |
|
"learning_rate": 0.00021, |
|
"loss": 0.2693, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0015524816419045845, |
|
"eval_loss": 0.39680594205856323, |
|
"eval_runtime": 1162.3895, |
|
"eval_samples_per_second": 11.666, |
|
"eval_steps_per_second": 2.917, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0018629779702855013, |
|
"grad_norm": 1.1730502843856812, |
|
"learning_rate": 0.00020974422527728155, |
|
"loss": 0.3794, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0021734742986664183, |
|
"grad_norm": 2.169248580932617, |
|
"learning_rate": 0.0002089781472178649, |
|
"loss": 0.3869, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0024839706270473353, |
|
"grad_norm": 14.912446022033691, |
|
"learning_rate": 0.0002077054980770496, |
|
"loss": 0.3622, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.002794466955428252, |
|
"grad_norm": 0.8610610961914062, |
|
"learning_rate": 0.00020593247807352348, |
|
"loss": 0.3092, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.003104963283809169, |
|
"grad_norm": 0.7043868899345398, |
|
"learning_rate": 0.00020366772518252038, |
|
"loss": 0.3479, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.003104963283809169, |
|
"eval_loss": 0.3086857795715332, |
|
"eval_runtime": 1161.3888, |
|
"eval_samples_per_second": 11.677, |
|
"eval_steps_per_second": 2.92, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.003415459612190086, |
|
"grad_norm": 0.526524007320404, |
|
"learning_rate": 0.0002009222730524731, |
|
"loss": 0.2852, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0037259559405710026, |
|
"grad_norm": 2.453948974609375, |
|
"learning_rate": 0.00019770949725018733, |
|
"loss": 0.3807, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.00403645226895192, |
|
"grad_norm": 1.1369106769561768, |
|
"learning_rate": 0.00019404505009642473, |
|
"loss": 0.4655, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.004346948597332837, |
|
"grad_norm": 0.5654745101928711, |
|
"learning_rate": 0.0001899467844093695, |
|
"loss": 0.3161, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.004657444925713753, |
|
"grad_norm": 0.7114362716674805, |
|
"learning_rate": 0.00018543466652749268, |
|
"loss": 0.2704, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.004657444925713753, |
|
"eval_loss": 0.33749160170555115, |
|
"eval_runtime": 1163.9994, |
|
"eval_samples_per_second": 11.65, |
|
"eval_steps_per_second": 2.913, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.004967941254094671, |
|
"grad_norm": 0.620780885219574, |
|
"learning_rate": 0.00018053067903555837, |
|
"loss": 0.333, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.005278437582475587, |
|
"grad_norm": 14.071165084838867, |
|
"learning_rate": 0.00017525871366768012, |
|
"loss": 0.3744, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.005588933910856504, |
|
"grad_norm": 0.7737940549850464, |
|
"learning_rate": 0.00016964445490919413, |
|
"loss": 0.3283, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.005899430239237421, |
|
"grad_norm": 0.687343955039978, |
|
"learning_rate": 0.00016371525486442843, |
|
"loss": 0.3368, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.006209926567618338, |
|
"grad_norm": 0.7471249103546143, |
|
"learning_rate": 0.0001575, |
|
"loss": 0.2711, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.006209926567618338, |
|
"eval_loss": 0.3042045831680298, |
|
"eval_runtime": 1163.7747, |
|
"eval_samples_per_second": 11.653, |
|
"eval_steps_per_second": 2.914, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0065204228959992545, |
|
"grad_norm": 0.6126402616500854, |
|
"learning_rate": 0.00015102897041285315, |
|
"loss": 0.3165, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.006830919224380172, |
|
"grad_norm": 3.4305641651153564, |
|
"learning_rate": 0.00014433369230867077, |
|
"loss": 0.2982, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.0071414155527610886, |
|
"grad_norm": 4.1259589195251465, |
|
"learning_rate": 0.0001374467844093695, |
|
"loss": 0.4701, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.007451911881142005, |
|
"grad_norm": 0.6386480331420898, |
|
"learning_rate": 0.0001304017990379651, |
|
"loss": 0.3251, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.007762408209522923, |
|
"grad_norm": 0.4939100742340088, |
|
"learning_rate": 0.0001232330586550277, |
|
"loss": 0.2631, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.007762408209522923, |
|
"eval_loss": 0.293373167514801, |
|
"eval_runtime": 1164.2178, |
|
"eval_samples_per_second": 11.648, |
|
"eval_steps_per_second": 2.913, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.00807290453790384, |
|
"grad_norm": 0.7074109315872192, |
|
"learning_rate": 0.00011597548864310363, |
|
"loss": 0.2975, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.008383400866284757, |
|
"grad_norm": 1.0438284873962402, |
|
"learning_rate": 0.00010866444715376263, |
|
"loss": 0.2717, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.008693897194665673, |
|
"grad_norm": 0.7922680974006653, |
|
"learning_rate": 0.00010133555284623744, |
|
"loss": 0.4675, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.00900439352304659, |
|
"grad_norm": 0.6056926846504211, |
|
"learning_rate": 9.402451135689641e-05, |
|
"loss": 0.314, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.009314889851427506, |
|
"grad_norm": 1.8034029006958008, |
|
"learning_rate": 8.676694134497232e-05, |
|
"loss": 0.2882, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.009314889851427506, |
|
"eval_loss": 0.29820531606674194, |
|
"eval_runtime": 1164.5486, |
|
"eval_samples_per_second": 11.645, |
|
"eval_steps_per_second": 2.912, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.009625386179808423, |
|
"grad_norm": 0.48237577080726624, |
|
"learning_rate": 7.95982009620349e-05, |
|
"loss": 0.3016, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.009935882508189341, |
|
"grad_norm": 0.5947824120521545, |
|
"learning_rate": 7.255321559063053e-05, |
|
"loss": 0.2607, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.010246378836570258, |
|
"grad_norm": 1.112855076789856, |
|
"learning_rate": 6.566630769132923e-05, |
|
"loss": 0.4451, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.010556875164951175, |
|
"grad_norm": 0.5384666919708252, |
|
"learning_rate": 5.897102958714686e-05, |
|
"loss": 0.2777, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.010867371493332091, |
|
"grad_norm": 0.3258765935897827, |
|
"learning_rate": 5.250000000000002e-05, |
|
"loss": 0.2759, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.010867371493332091, |
|
"eval_loss": 0.2902805805206299, |
|
"eval_runtime": 1164.9711, |
|
"eval_samples_per_second": 11.641, |
|
"eval_steps_per_second": 2.911, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.011177867821713008, |
|
"grad_norm": 0.6546416282653809, |
|
"learning_rate": 4.62847451355716e-05, |
|
"loss": 0.3157, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.011488364150093924, |
|
"grad_norm": 0.703099250793457, |
|
"learning_rate": 4.035554509080588e-05, |
|
"loss": 0.2672, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.011798860478474843, |
|
"grad_norm": 1.0945324897766113, |
|
"learning_rate": 3.474128633231992e-05, |
|
"loss": 0.3675, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.01210935680685576, |
|
"grad_norm": 0.3610471785068512, |
|
"learning_rate": 2.946932096444165e-05, |
|
"loss": 0.2548, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.012419853135236676, |
|
"grad_norm": 0.3258683383464813, |
|
"learning_rate": 2.456533347250732e-05, |
|
"loss": 0.2487, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.012419853135236676, |
|
"eval_loss": 0.2859453856945038, |
|
"eval_runtime": 1164.1855, |
|
"eval_samples_per_second": 11.648, |
|
"eval_steps_per_second": 2.913, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.012730349463617592, |
|
"grad_norm": 0.45075902342796326, |
|
"learning_rate": 2.005321559063053e-05, |
|
"loss": 0.2686, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.013040845791998509, |
|
"grad_norm": 0.568687379360199, |
|
"learning_rate": 1.5954949903575276e-05, |
|
"loss": 0.2879, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.013351342120379427, |
|
"grad_norm": 1.1055338382720947, |
|
"learning_rate": 1.2290502749812666e-05, |
|
"loss": 0.3734, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.013661838448760344, |
|
"grad_norm": 0.2285093367099762, |
|
"learning_rate": 9.077726947526898e-06, |
|
"loss": 0.2876, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.01397233477714126, |
|
"grad_norm": 0.41890841722488403, |
|
"learning_rate": 6.332274817479627e-06, |
|
"loss": 0.2627, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.01397233477714126, |
|
"eval_loss": 0.28539028763771057, |
|
"eval_runtime": 1163.9333, |
|
"eval_samples_per_second": 11.651, |
|
"eval_steps_per_second": 2.913, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.014282831105522177, |
|
"grad_norm": 0.9566968083381653, |
|
"learning_rate": 4.067521926476516e-06, |
|
"loss": 0.291, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.014593327433903094, |
|
"grad_norm": 0.5413401126861572, |
|
"learning_rate": 2.294501922950403e-06, |
|
"loss": 0.2643, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.01490382376228401, |
|
"grad_norm": 0.8163195252418518, |
|
"learning_rate": 1.021852782135112e-06, |
|
"loss": 0.3415, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.015214320090664929, |
|
"grad_norm": 0.27404096722602844, |
|
"learning_rate": 2.5577472271845927e-07, |
|
"loss": 0.295, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.015524816419045845, |
|
"grad_norm": 0.3744218945503235, |
|
"learning_rate": 0.0, |
|
"loss": 0.2789, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.015524816419045845, |
|
"eval_loss": 0.28519564867019653, |
|
"eval_runtime": 1163.2315, |
|
"eval_samples_per_second": 11.658, |
|
"eval_steps_per_second": 2.915, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3972035882870374e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|