|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.1363636363636362, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011363636363636364, |
|
"eval_loss": 3.371885061264038, |
|
"eval_runtime": 16.7352, |
|
"eval_samples_per_second": 8.844, |
|
"eval_steps_per_second": 1.135, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03409090909090909, |
|
"grad_norm": 0.9526873826980591, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.4877, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06818181818181818, |
|
"grad_norm": 0.9885739088058472, |
|
"learning_rate": 3e-05, |
|
"loss": 3.3668, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10227272727272728, |
|
"grad_norm": 1.137595772743225, |
|
"learning_rate": 4.5e-05, |
|
"loss": 3.5998, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10227272727272728, |
|
"eval_loss": 3.313127279281616, |
|
"eval_runtime": 17.0437, |
|
"eval_samples_per_second": 8.684, |
|
"eval_steps_per_second": 1.115, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13636363636363635, |
|
"grad_norm": 1.3410749435424805, |
|
"learning_rate": 4.993910125649561e-05, |
|
"loss": 3.3443, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17045454545454544, |
|
"grad_norm": 1.3125852346420288, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 3.127, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.20454545454545456, |
|
"grad_norm": 1.3217312097549438, |
|
"learning_rate": 4.9031542398457974e-05, |
|
"loss": 3.0135, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.20454545454545456, |
|
"eval_loss": 2.8074119091033936, |
|
"eval_runtime": 17.0586, |
|
"eval_samples_per_second": 8.676, |
|
"eval_steps_per_second": 1.114, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.23863636363636365, |
|
"grad_norm": 1.6107302904129028, |
|
"learning_rate": 4.817959636416969e-05, |
|
"loss": 2.6472, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2727272727272727, |
|
"grad_norm": 1.6939775943756104, |
|
"learning_rate": 4.707368982147318e-05, |
|
"loss": 2.6063, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3068181818181818, |
|
"grad_norm": 1.569953203201294, |
|
"learning_rate": 4.572593931387604e-05, |
|
"loss": 2.5102, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3068181818181818, |
|
"eval_loss": 2.223207473754883, |
|
"eval_runtime": 17.0516, |
|
"eval_samples_per_second": 8.68, |
|
"eval_steps_per_second": 1.114, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3409090909090909, |
|
"grad_norm": 1.427184820175171, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 2.1669, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.375, |
|
"grad_norm": 1.5676186084747314, |
|
"learning_rate": 4.2366459261474933e-05, |
|
"loss": 2.2602, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4090909090909091, |
|
"grad_norm": 1.4393879175186157, |
|
"learning_rate": 4.039153688314145e-05, |
|
"loss": 2.3522, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4090909090909091, |
|
"eval_loss": 2.0838232040405273, |
|
"eval_runtime": 17.0641, |
|
"eval_samples_per_second": 8.673, |
|
"eval_steps_per_second": 1.113, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4431818181818182, |
|
"grad_norm": 1.435742974281311, |
|
"learning_rate": 3.824798160583012e-05, |
|
"loss": 2.0674, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.4772727272727273, |
|
"grad_norm": 1.354801893234253, |
|
"learning_rate": 3.5959278669726935e-05, |
|
"loss": 2.2818, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5113636363636364, |
|
"grad_norm": 1.3888118267059326, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 2.0912, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5113636363636364, |
|
"eval_loss": 2.0078811645507812, |
|
"eval_runtime": 17.0471, |
|
"eval_samples_per_second": 8.682, |
|
"eval_steps_per_second": 1.115, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 1.4527229070663452, |
|
"learning_rate": 3.104804738999169e-05, |
|
"loss": 1.8334, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5795454545454546, |
|
"grad_norm": 1.287550449371338, |
|
"learning_rate": 2.8479327524001636e-05, |
|
"loss": 2.0519, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6136363636363636, |
|
"grad_norm": 1.421475887298584, |
|
"learning_rate": 2.587248741756253e-05, |
|
"loss": 2.0602, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6136363636363636, |
|
"eval_loss": 1.96761155128479, |
|
"eval_runtime": 17.0648, |
|
"eval_samples_per_second": 8.673, |
|
"eval_steps_per_second": 1.113, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6477272727272727, |
|
"grad_norm": 1.5127720832824707, |
|
"learning_rate": 2.3256088156396868e-05, |
|
"loss": 2.1334, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6818181818181818, |
|
"grad_norm": 1.5170916318893433, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 2.007, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7159090909090909, |
|
"grad_norm": 1.511754035949707, |
|
"learning_rate": 1.8109066104575023e-05, |
|
"loss": 2.039, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.7159090909090909, |
|
"eval_loss": 1.945967435836792, |
|
"eval_runtime": 17.0402, |
|
"eval_samples_per_second": 8.685, |
|
"eval_steps_per_second": 1.115, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 1.5611883401870728, |
|
"learning_rate": 1.56348351646022e-05, |
|
"loss": 2.1365, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.7840909090909091, |
|
"grad_norm": 1.4377385377883911, |
|
"learning_rate": 1.3263210930352737e-05, |
|
"loss": 2.111, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.8181818181818182, |
|
"grad_norm": 1.4909229278564453, |
|
"learning_rate": 1.1020177413231334e-05, |
|
"loss": 2.0401, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.8181818181818182, |
|
"eval_loss": 1.9314323663711548, |
|
"eval_runtime": 17.0455, |
|
"eval_samples_per_second": 8.683, |
|
"eval_steps_per_second": 1.115, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.8522727272727273, |
|
"grad_norm": 1.535409688949585, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 2.0856, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.8863636363636364, |
|
"grad_norm": 1.479998230934143, |
|
"learning_rate": 7.016504991533726e-06, |
|
"loss": 1.9215, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9204545454545454, |
|
"grad_norm": 1.4978396892547607, |
|
"learning_rate": 5.299731159831953e-06, |
|
"loss": 2.0011, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9204545454545454, |
|
"eval_loss": 1.9259672164916992, |
|
"eval_runtime": 17.0378, |
|
"eval_samples_per_second": 8.687, |
|
"eval_steps_per_second": 1.115, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9545454545454546, |
|
"grad_norm": 1.4255573749542236, |
|
"learning_rate": 3.798797596089351e-06, |
|
"loss": 2.1283, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.9886363636363636, |
|
"grad_norm": 1.549778938293457, |
|
"learning_rate": 2.5301488425208296e-06, |
|
"loss": 1.9712, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.0227272727272727, |
|
"grad_norm": 1.6622482538223267, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 1.9208, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0227272727272727, |
|
"eval_loss": 1.922031283378601, |
|
"eval_runtime": 17.0501, |
|
"eval_samples_per_second": 8.68, |
|
"eval_steps_per_second": 1.114, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0568181818181819, |
|
"grad_norm": 1.373210072517395, |
|
"learning_rate": 7.426068431000882e-07, |
|
"loss": 1.8497, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.4251350164413452, |
|
"learning_rate": 2.4329828146074095e-07, |
|
"loss": 1.8534, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.125, |
|
"grad_norm": 1.5110843181610107, |
|
"learning_rate": 1.522932452260595e-08, |
|
"loss": 1.7715, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.125, |
|
"eval_loss": 1.9205644130706787, |
|
"eval_runtime": 17.0633, |
|
"eval_samples_per_second": 8.674, |
|
"eval_steps_per_second": 1.114, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.379352952411259e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|