|
{ |
|
"best_metric": 0.16282150149345398, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.03711952487008166, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007423904974016332, |
|
"grad_norm": 0.6487741470336914, |
|
"learning_rate": 0.0001, |
|
"loss": 0.1482, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007423904974016332, |
|
"eval_loss": 2.591169834136963, |
|
"eval_runtime": 197.2852, |
|
"eval_samples_per_second": 2.879, |
|
"eval_steps_per_second": 1.44, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014847809948032665, |
|
"grad_norm": 2.3651726245880127, |
|
"learning_rate": 0.0002, |
|
"loss": 0.4163, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0022271714922048997, |
|
"grad_norm": 1.3198089599609375, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 0.3814, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.002969561989606533, |
|
"grad_norm": 2.1703317165374756, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 0.2471, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003711952487008166, |
|
"grad_norm": 1.6247034072875977, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 0.1354, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004454342984409799, |
|
"grad_norm": 2.12754487991333, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 0.1925, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005196733481811433, |
|
"grad_norm": 1.0829967260360718, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.1403, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005939123979213066, |
|
"grad_norm": 1.0773605108261108, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.1134, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0066815144766146995, |
|
"grad_norm": 0.8257158398628235, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.0786, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007423904974016332, |
|
"grad_norm": 0.5242513418197632, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.0609, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008166295471417966, |
|
"grad_norm": 1.4674206972122192, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.0633, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.008908685968819599, |
|
"grad_norm": 1.7347350120544434, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 0.0578, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.009651076466221232, |
|
"grad_norm": 0.9604913592338562, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.0722, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010393466963622866, |
|
"grad_norm": 1.2394442558288574, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.1024, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011135857461024499, |
|
"grad_norm": 1.7442325353622437, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.108, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.011878247958426132, |
|
"grad_norm": 0.8243395090103149, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.069, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.012620638455827766, |
|
"grad_norm": 0.9156538248062134, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.0934, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.013363028953229399, |
|
"grad_norm": 0.5281884670257568, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.0152, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014105419450631032, |
|
"grad_norm": 1.2212343215942383, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.0413, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.014847809948032665, |
|
"grad_norm": 0.5214835405349731, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0463, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.015590200445434299, |
|
"grad_norm": 0.3459300398826599, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.017, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.016332590942835932, |
|
"grad_norm": 0.5943859815597534, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.0299, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.017074981440237565, |
|
"grad_norm": 1.0379056930541992, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.0824, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.017817371937639197, |
|
"grad_norm": 1.2425271272659302, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.1157, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01855976243504083, |
|
"grad_norm": 1.3894206285476685, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.0278, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01855976243504083, |
|
"eval_loss": 0.16282150149345398, |
|
"eval_runtime": 198.8339, |
|
"eval_samples_per_second": 2.857, |
|
"eval_steps_per_second": 1.428, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019302152932442463, |
|
"grad_norm": 0.3026466369628906, |
|
"learning_rate": 0.0001, |
|
"loss": 0.007, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0200445434298441, |
|
"grad_norm": 0.882306694984436, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 0.136, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.020786933927245732, |
|
"grad_norm": 0.8444817066192627, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.0609, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.021529324424647365, |
|
"grad_norm": 1.1410984992980957, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.0993, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.022271714922048998, |
|
"grad_norm": 0.5272141098976135, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.0364, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02301410541945063, |
|
"grad_norm": 0.3858809769153595, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.0412, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.023756495916852263, |
|
"grad_norm": 0.44752252101898193, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0214, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.024498886414253896, |
|
"grad_norm": 0.5306777954101562, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.011, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.025241276911655532, |
|
"grad_norm": 0.9459553360939026, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.0844, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.025983667409057165, |
|
"grad_norm": 0.47654223442077637, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.0523, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.026726057906458798, |
|
"grad_norm": 0.9003578424453735, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.0696, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02746844840386043, |
|
"grad_norm": 2.070967674255371, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 0.1786, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.028210838901262063, |
|
"grad_norm": 0.4337747097015381, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.0155, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.028953229398663696, |
|
"grad_norm": 0.3766095042228699, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.0135, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02969561989606533, |
|
"grad_norm": 1.6106809377670288, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.0379, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.030438010393466965, |
|
"grad_norm": 2.527824878692627, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.042, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.031180400890868598, |
|
"grad_norm": 2.065561056137085, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.0284, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03192279138827023, |
|
"grad_norm": 0.4519674777984619, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.0148, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.032665181885671864, |
|
"grad_norm": 0.6088566184043884, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0132, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0334075723830735, |
|
"grad_norm": 0.1786351203918457, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.0076, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03414996288047513, |
|
"grad_norm": 0.5400381684303284, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 0.0073, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.034892353377876766, |
|
"grad_norm": 62.15085983276367, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 1.0234, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.035634743875278395, |
|
"grad_norm": 23.811513900756836, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.7888, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03637713437268003, |
|
"grad_norm": 27.518611907958984, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.8569, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03711952487008166, |
|
"grad_norm": 50.407073974609375, |
|
"learning_rate": 0.0, |
|
"loss": 1.3908, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03711952487008166, |
|
"eval_loss": 0.1675003468990326, |
|
"eval_runtime": 198.8265, |
|
"eval_samples_per_second": 2.857, |
|
"eval_steps_per_second": 1.428, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.455967199782502e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|