|
{ |
|
"best_metric": 0.44097796082496643, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.026441036488630356, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005288207297726071, |
|
"grad_norm": 3.079803705215454, |
|
"learning_rate": 0.0001, |
|
"loss": 2.107, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005288207297726071, |
|
"eval_loss": 3.4710946083068848, |
|
"eval_runtime": 279.4296, |
|
"eval_samples_per_second": 2.852, |
|
"eval_steps_per_second": 1.428, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010576414595452142, |
|
"grad_norm": 3.9500977993011475, |
|
"learning_rate": 0.0002, |
|
"loss": 2.2537, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0015864621893178213, |
|
"grad_norm": 3.463766574859619, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 2.084, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0021152829190904283, |
|
"grad_norm": 3.929175853729248, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 1.825, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0026441036488630354, |
|
"grad_norm": 3.6939759254455566, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 1.2347, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0031729243786356425, |
|
"grad_norm": 3.8677520751953125, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.2043, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0037017451084082496, |
|
"grad_norm": 2.874622344970703, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.8201, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.004230565838180857, |
|
"grad_norm": 2.879485607147217, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.8926, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004759386567953464, |
|
"grad_norm": 4.38440465927124, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.8809, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.005288207297726071, |
|
"grad_norm": 3.197903871536255, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.6542, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005817028027498678, |
|
"grad_norm": 2.5063633918762207, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.5457, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.006345848757271285, |
|
"grad_norm": 2.595250129699707, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.4891, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006874669487043892, |
|
"grad_norm": 3.3837358951568604, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.8347, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.007403490216816499, |
|
"grad_norm": 2.079376220703125, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.4495, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007932310946589107, |
|
"grad_norm": 2.317464828491211, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.6247, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.008461131676361713, |
|
"grad_norm": 2.3306057453155518, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.3897, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.008989952406134321, |
|
"grad_norm": 2.1108222007751465, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.5025, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.009518773135906928, |
|
"grad_norm": 3.1157798767089844, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.7051, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.010047593865679535, |
|
"grad_norm": 2.5793957710266113, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.6203, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.010576414595452142, |
|
"grad_norm": 1.315252661705017, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.3476, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01110523532522475, |
|
"grad_norm": 1.7388885021209717, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.4823, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.011634056054997356, |
|
"grad_norm": 2.825782060623169, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.5343, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.012162876784769964, |
|
"grad_norm": 2.279564619064331, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.3226, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01269169751454257, |
|
"grad_norm": 1.5213063955307007, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.2932, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.013220518244315178, |
|
"grad_norm": 2.1449546813964844, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.3167, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013220518244315178, |
|
"eval_loss": 0.49429598450660706, |
|
"eval_runtime": 280.2863, |
|
"eval_samples_per_second": 2.844, |
|
"eval_steps_per_second": 1.424, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.013749338974087784, |
|
"grad_norm": 2.0070135593414307, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3063, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.014278159703860392, |
|
"grad_norm": 2.7425220012664795, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.5191, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.014806980433632998, |
|
"grad_norm": 1.8991012573242188, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.4107, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.015335801163405606, |
|
"grad_norm": 2.079669237136841, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.575, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.015864621893178214, |
|
"grad_norm": 1.5450035333633423, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.285, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01639344262295082, |
|
"grad_norm": 1.7861802577972412, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.3807, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.016922263352723427, |
|
"grad_norm": 1.9254704713821411, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.4574, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.017451084082496033, |
|
"grad_norm": 2.295936107635498, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.4582, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.017979904812268643, |
|
"grad_norm": 2.942246437072754, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.5926, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01850872554204125, |
|
"grad_norm": 1.30338454246521, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.2572, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.019037546271813855, |
|
"grad_norm": 2.0632126331329346, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.4064, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01956636700158646, |
|
"grad_norm": 1.9891884326934814, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.397, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02009518773135907, |
|
"grad_norm": 1.516108751296997, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.2141, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.020624008461131677, |
|
"grad_norm": 2.0183143615722656, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.2988, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.021152829190904283, |
|
"grad_norm": 1.2690092325210571, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.3371, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.02168164992067689, |
|
"grad_norm": 1.9884178638458252, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.3387, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0222104706504495, |
|
"grad_norm": 1.528070092201233, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.275, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.022739291380222106, |
|
"grad_norm": 1.3429299592971802, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.2422, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.023268112109994712, |
|
"grad_norm": 2.5254251956939697, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.4689, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.023796932839767318, |
|
"grad_norm": 1.6032429933547974, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.371, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.024325753569539928, |
|
"grad_norm": 1.51561439037323, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.1822, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.024854574299312534, |
|
"grad_norm": 2.3901467323303223, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.3474, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02538339502908514, |
|
"grad_norm": 1.7008579969406128, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.2807, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.025912215758857746, |
|
"grad_norm": 5.191266059875488, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.5048, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.026441036488630356, |
|
"grad_norm": 2.155629873275757, |
|
"learning_rate": 0.0, |
|
"loss": 0.4876, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.026441036488630356, |
|
"eval_loss": 0.44097796082496643, |
|
"eval_runtime": 280.5295, |
|
"eval_samples_per_second": 2.841, |
|
"eval_steps_per_second": 1.422, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.41887283560448e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|