|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.9979315099977017,
|
|
"eval_steps": 500,
|
|
"global_step": 3261,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.09193288899103655,
|
|
"grad_norm": 0.15536943078041077,
|
|
"learning_rate": 0.00019988036932954271,
|
|
"loss": 0.8236,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.1838657779820731,
|
|
"grad_norm": 0.16779352724552155,
|
|
"learning_rate": 0.00019892504066072438,
|
|
"loss": 0.6327,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.2757986669731096,
|
|
"grad_norm": 0.21519789099693298,
|
|
"learning_rate": 0.0001970235207829469,
|
|
"loss": 0.5599,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.3677315559641462,
|
|
"grad_norm": 0.1756363958120346,
|
|
"learning_rate": 0.0001941939972186009,
|
|
"loss": 0.5278,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.4596644449551827,
|
|
"grad_norm": 0.19388951361179352,
|
|
"learning_rate": 0.00019046353359388504,
|
|
"loss": 0.4978,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.5515973339462192,
|
|
"grad_norm": 0.2049034982919693,
|
|
"learning_rate": 0.00018586781078255458,
|
|
"loss": 0.4743,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.6435302229372558,
|
|
"grad_norm": 0.1463390439748764,
|
|
"learning_rate": 0.00018045078562803203,
|
|
"loss": 0.4456,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.7354631119282924,
|
|
"grad_norm": 0.19917741417884827,
|
|
"learning_rate": 0.0001742642705081106,
|
|
"loss": 0.4407,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.8273960009193289,
|
|
"grad_norm": 0.19318071007728577,
|
|
"learning_rate": 0.00016736743776359978,
|
|
"loss": 0.421,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.9193288899103654,
|
|
"grad_norm": 0.1719323843717575,
|
|
"learning_rate": 0.00015982625373091875,
|
|
"loss": 0.409,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 1.011261778901402,
|
|
"grad_norm": 0.2220071256160736,
|
|
"learning_rate": 0.00015171284779196334,
|
|
"loss": 0.3985,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 1.1031946678924385,
|
|
"grad_norm": 0.14137883484363556,
|
|
"learning_rate": 0.00014310482247611208,
|
|
"loss": 0.3805,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 1.1951275568834752,
|
|
"grad_norm": 0.14456775784492493,
|
|
"learning_rate": 0.00013408451121306046,
|
|
"loss": 0.3747,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.2870604458745116,
|
|
"grad_norm": 0.1589755117893219,
|
|
"learning_rate": 0.0001247381908358749,
|
|
"loss": 0.3716,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.378993334865548,
|
|
"grad_norm": 0.20999275147914886,
|
|
"learning_rate": 0.00011515525636646231,
|
|
"loss": 0.3618,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.4709262238565848,
|
|
"grad_norm": 0.15207038819789886,
|
|
"learning_rate": 0.00010542736597640826,
|
|
"loss": 0.3595,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.5628591128476212,
|
|
"grad_norm": 0.13484518229961395,
|
|
"learning_rate": 9.564756430140164e-05,
|
|
"loss": 0.3581,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.6547920018386577,
|
|
"grad_norm": 0.1453435719013214,
|
|
"learning_rate": 8.590939249450595e-05,
|
|
"loss": 0.3561,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.7467248908296944,
|
|
"grad_norm": 0.16422341763973236,
|
|
"learning_rate": 7.630599353037633e-05,
|
|
"loss": 0.3564,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.8386577798207309,
|
|
"grad_norm": 0.13742196559906006,
|
|
"learning_rate": 6.692922131794517e-05,
|
|
"loss": 0.353,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.9305906688117673,
|
|
"grad_norm": 0.1561811864376068,
|
|
"learning_rate": 5.7868762142672204e-05,
|
|
"loss": 0.3531,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 2.022523557802804,
|
|
"grad_norm": 0.13941729068756104,
|
|
"learning_rate": 4.9211276841525744e-05,
|
|
"loss": 0.3438,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 2.1144564467938407,
|
|
"grad_norm": 0.13483913242816925,
|
|
"learning_rate": 4.103957191555876e-05,
|
|
"loss": 0.339,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 2.206389335784877,
|
|
"grad_norm": 0.16717399656772614,
|
|
"learning_rate": 3.343180750816377e-05,
|
|
"loss": 0.3386,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 2.2983222247759136,
|
|
"grad_norm": 0.14586015045642853,
|
|
"learning_rate": 2.6460749824479912e-05,
|
|
"loss": 0.3313,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.3902551137669503,
|
|
"grad_norm": 0.14884348213672638,
|
|
"learning_rate": 2.019307514235741e-05,
|
|
"loss": 0.3344,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 2.4821880027579866,
|
|
"grad_norm": 0.15428389608860016,
|
|
"learning_rate": 1.4688732071827094e-05,
|
|
"loss": 0.3321,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.5741208917490233,
|
|
"grad_norm": 0.1362064927816391,
|
|
"learning_rate": 1.0000368162888795e-05,
|
|
"loss": 0.3364,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.66605378074006,
|
|
"grad_norm": 0.1446692794561386,
|
|
"learning_rate": 6.1728263459614796e-06,
|
|
"loss": 0.3364,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.757986669731096,
|
|
"grad_norm": 0.14172622561454773,
|
|
"learning_rate": 3.2427160214043793e-06,
|
|
"loss": 0.3358,
|
|
"step": 3000
|
|
},
|
|
{
|
|
"epoch": 2.849919558722133,
|
|
"grad_norm": 0.14800885319709778,
|
|
"learning_rate": 1.2380629005222855e-06,
|
|
"loss": 0.3341,
|
|
"step": 3100
|
|
},
|
|
{
|
|
"epoch": 2.9418524477131696,
|
|
"grad_norm": 0.1342545449733734,
|
|
"learning_rate": 1.7804094723098408e-07,
|
|
"loss": 0.3333,
|
|
"step": 3200
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 3261,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.206758990180516e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|