hbXNov's picture
Add files using upload-large-folder tool
aa10811 verified
raw
history blame
9.98 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 26868,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05582849486377847,
"grad_norm": 2.144900321960449,
"learning_rate": 4.90695250856037e-07,
"loss": 0.7769,
"step": 500
},
{
"epoch": 0.11165698972755694,
"grad_norm": 2.4554147720336914,
"learning_rate": 4.813905017120738e-07,
"loss": 0.6739,
"step": 1000
},
{
"epoch": 0.16748548459133542,
"grad_norm": 2.2560763359069824,
"learning_rate": 4.7208575256811075e-07,
"loss": 0.6411,
"step": 1500
},
{
"epoch": 0.2233139794551139,
"grad_norm": 1.9884450435638428,
"learning_rate": 4.627810034241477e-07,
"loss": 0.6238,
"step": 2000
},
{
"epoch": 0.2791424743188924,
"grad_norm": 2.335881471633911,
"learning_rate": 4.534762542801846e-07,
"loss": 0.6322,
"step": 2500
},
{
"epoch": 0.33497096918267083,
"grad_norm": 1.7922388315200806,
"learning_rate": 4.4417150513622147e-07,
"loss": 0.6251,
"step": 3000
},
{
"epoch": 0.39079946404644933,
"grad_norm": 2.6557819843292236,
"learning_rate": 4.348667559922584e-07,
"loss": 0.6248,
"step": 3500
},
{
"epoch": 0.4466279589102278,
"grad_norm": 2.993184804916382,
"learning_rate": 4.255620068482954e-07,
"loss": 0.6116,
"step": 4000
},
{
"epoch": 0.5024564537740063,
"grad_norm": 1.821624755859375,
"learning_rate": 4.162572577043323e-07,
"loss": 0.6148,
"step": 4500
},
{
"epoch": 0.5582849486377848,
"grad_norm": 2.4944441318511963,
"learning_rate": 4.0695250856036917e-07,
"loss": 0.6139,
"step": 5000
},
{
"epoch": 0.6141134435015632,
"grad_norm": 1.9961040019989014,
"learning_rate": 3.976477594164061e-07,
"loss": 0.61,
"step": 5500
},
{
"epoch": 0.6699419383653417,
"grad_norm": 1.723148226737976,
"learning_rate": 3.88343010272443e-07,
"loss": 0.5989,
"step": 6000
},
{
"epoch": 0.7257704332291202,
"grad_norm": 2.802048683166504,
"learning_rate": 3.7903826112848e-07,
"loss": 0.5965,
"step": 6500
},
{
"epoch": 0.7815989280928987,
"grad_norm": 1.4735087156295776,
"learning_rate": 3.6973351198451687e-07,
"loss": 0.6026,
"step": 7000
},
{
"epoch": 0.837427422956677,
"grad_norm": 1.827397346496582,
"learning_rate": 3.604287628405538e-07,
"loss": 0.5935,
"step": 7500
},
{
"epoch": 0.8932559178204555,
"grad_norm": 1.5191324949264526,
"learning_rate": 3.511240136965907e-07,
"loss": 0.595,
"step": 8000
},
{
"epoch": 0.949084412684234,
"grad_norm": 2.2336690425872803,
"learning_rate": 3.4181926455262764e-07,
"loss": 0.5942,
"step": 8500
},
{
"epoch": 1.0049129075480125,
"grad_norm": 2.0507538318634033,
"learning_rate": 3.325145154086645e-07,
"loss": 0.5899,
"step": 9000
},
{
"epoch": 1.060741402411791,
"grad_norm": 2.3716676235198975,
"learning_rate": 3.232097662647015e-07,
"loss": 0.5766,
"step": 9500
},
{
"epoch": 1.1165698972755695,
"grad_norm": 2.1963398456573486,
"learning_rate": 3.139050171207384e-07,
"loss": 0.5838,
"step": 10000
},
{
"epoch": 1.172398392139348,
"grad_norm": 2.1941640377044678,
"learning_rate": 3.0460026797677534e-07,
"loss": 0.582,
"step": 10500
},
{
"epoch": 1.2282268870031263,
"grad_norm": 1.9807358980178833,
"learning_rate": 2.952955188328122e-07,
"loss": 0.5879,
"step": 11000
},
{
"epoch": 1.284055381866905,
"grad_norm": 2.4434101581573486,
"learning_rate": 2.8599076968884914e-07,
"loss": 0.5811,
"step": 11500
},
{
"epoch": 1.3398838767306833,
"grad_norm": 1.6561393737792969,
"learning_rate": 2.766860205448861e-07,
"loss": 0.5778,
"step": 12000
},
{
"epoch": 1.395712371594462,
"grad_norm": 1.4104284048080444,
"learning_rate": 2.6738127140092304e-07,
"loss": 0.5752,
"step": 12500
},
{
"epoch": 1.4515408664582403,
"grad_norm": 2.0261754989624023,
"learning_rate": 2.580765222569599e-07,
"loss": 0.5802,
"step": 13000
},
{
"epoch": 1.5073693613220187,
"grad_norm": 2.1292824745178223,
"learning_rate": 2.4877177311299684e-07,
"loss": 0.582,
"step": 13500
},
{
"epoch": 1.563197856185797,
"grad_norm": 2.074819564819336,
"learning_rate": 2.3946702396903376e-07,
"loss": 0.5804,
"step": 14000
},
{
"epoch": 1.6190263510495757,
"grad_norm": 2.1826281547546387,
"learning_rate": 2.301622748250707e-07,
"loss": 0.5771,
"step": 14500
},
{
"epoch": 1.6748548459133543,
"grad_norm": 1.8490856885910034,
"learning_rate": 2.2085752568110764e-07,
"loss": 0.5687,
"step": 15000
},
{
"epoch": 1.7306833407771327,
"grad_norm": 1.8786789178848267,
"learning_rate": 2.1155277653714454e-07,
"loss": 0.5594,
"step": 15500
},
{
"epoch": 1.786511835640911,
"grad_norm": 1.9750012159347534,
"learning_rate": 2.0224802739318146e-07,
"loss": 0.5899,
"step": 16000
},
{
"epoch": 1.8423403305046895,
"grad_norm": 2.6090946197509766,
"learning_rate": 1.9294327824921839e-07,
"loss": 0.577,
"step": 16500
},
{
"epoch": 1.898168825368468,
"grad_norm": 1.756086826324463,
"learning_rate": 1.836385291052553e-07,
"loss": 0.5719,
"step": 17000
},
{
"epoch": 1.9539973202322467,
"grad_norm": 2.12827205657959,
"learning_rate": 1.7433377996129224e-07,
"loss": 0.5763,
"step": 17500
},
{
"epoch": 2.009825815096025,
"grad_norm": 2.411118745803833,
"learning_rate": 1.6502903081732916e-07,
"loss": 0.5702,
"step": 18000
},
{
"epoch": 2.0656543099598035,
"grad_norm": 1.790048599243164,
"learning_rate": 1.5572428167336606e-07,
"loss": 0.5625,
"step": 18500
},
{
"epoch": 2.121482804823582,
"grad_norm": 2.1843719482421875,
"learning_rate": 1.46419532529403e-07,
"loss": 0.567,
"step": 19000
},
{
"epoch": 2.1773112996873603,
"grad_norm": 1.9832565784454346,
"learning_rate": 1.371147833854399e-07,
"loss": 0.5698,
"step": 19500
},
{
"epoch": 2.233139794551139,
"grad_norm": 2.159323215484619,
"learning_rate": 1.2781003424147686e-07,
"loss": 0.577,
"step": 20000
},
{
"epoch": 2.2889682894149175,
"grad_norm": 2.5329527854919434,
"learning_rate": 1.1850528509751377e-07,
"loss": 0.5539,
"step": 20500
},
{
"epoch": 2.344796784278696,
"grad_norm": 1.9504591226577759,
"learning_rate": 1.092005359535507e-07,
"loss": 0.5653,
"step": 21000
},
{
"epoch": 2.4006252791424743,
"grad_norm": 2.2535276412963867,
"learning_rate": 9.989578680958762e-08,
"loss": 0.5585,
"step": 21500
},
{
"epoch": 2.4564537740062526,
"grad_norm": 1.9028602838516235,
"learning_rate": 9.059103766562453e-08,
"loss": 0.5714,
"step": 22000
},
{
"epoch": 2.512282268870031,
"grad_norm": 1.9021795988082886,
"learning_rate": 8.128628852166146e-08,
"loss": 0.5601,
"step": 22500
},
{
"epoch": 2.56811076373381,
"grad_norm": 1.945610523223877,
"learning_rate": 7.198153937769838e-08,
"loss": 0.5692,
"step": 23000
},
{
"epoch": 2.6239392585975883,
"grad_norm": 1.634543776512146,
"learning_rate": 6.26767902337353e-08,
"loss": 0.5796,
"step": 23500
},
{
"epoch": 2.6797677534613666,
"grad_norm": 2.275546073913574,
"learning_rate": 5.337204108977222e-08,
"loss": 0.5619,
"step": 24000
},
{
"epoch": 2.735596248325145,
"grad_norm": 1.9358148574829102,
"learning_rate": 4.406729194580914e-08,
"loss": 0.5496,
"step": 24500
},
{
"epoch": 2.791424743188924,
"grad_norm": 2.630359411239624,
"learning_rate": 3.476254280184606e-08,
"loss": 0.5688,
"step": 25000
},
{
"epoch": 2.8472532380527023,
"grad_norm": 1.6946734189987183,
"learning_rate": 2.545779365788298e-08,
"loss": 0.5694,
"step": 25500
},
{
"epoch": 2.9030817329164806,
"grad_norm": 1.3165154457092285,
"learning_rate": 1.6153044513919903e-08,
"loss": 0.5681,
"step": 26000
},
{
"epoch": 2.958910227780259,
"grad_norm": 1.509293794631958,
"learning_rate": 6.848295369956826e-09,
"loss": 0.5635,
"step": 26500
}
],
"logging_steps": 500,
"max_steps": 26868,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.2851483810883174e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}