|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.3515948819607138, |
|
"eval_steps": 500, |
|
"global_step": 15000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04505316273202379, |
|
"grad_norm": 2220.93115234375, |
|
"learning_rate": 1.25e-05, |
|
"loss": 517.9313, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09010632546404758, |
|
"grad_norm": 3532.487060546875, |
|
"learning_rate": 2.5e-05, |
|
"loss": 447.0173, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.13515948819607138, |
|
"grad_norm": 3076.73876953125, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 403.1765, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18021265092809516, |
|
"grad_norm": 2786.041015625, |
|
"learning_rate": 5e-05, |
|
"loss": 371.2528, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22526581366011894, |
|
"grad_norm": 2246.498046875, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 383.6465, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.27031897639214275, |
|
"grad_norm": 0.0, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 368.1067, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3153721391241665, |
|
"grad_norm": 3346.040771484375, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 354.079, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.3604253018561903, |
|
"grad_norm": 3417.40185546875, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 351.5845, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.40547846458821407, |
|
"grad_norm": 2524.94970703125, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 343.0565, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.4505316273202379, |
|
"grad_norm": 2467.03076171875, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 325.0454, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.4955847900522617, |
|
"grad_norm": 2483.644287109375, |
|
"learning_rate": 4.54788011072248e-05, |
|
"loss": 332.3088, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.5406379527842855, |
|
"grad_norm": 3229.221923828125, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 329.884, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.5856911155163093, |
|
"grad_norm": 2195.654541015625, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 334.0953, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.630744278248333, |
|
"grad_norm": 2431.37744140625, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 314.5535, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.6757974409803568, |
|
"grad_norm": 2344.476318359375, |
|
"learning_rate": 3.933941090877615e-05, |
|
"loss": 322.5146, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7208506037123806, |
|
"grad_norm": 2241.12255859375, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 314.8933, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.7659037664444044, |
|
"grad_norm": 0.0, |
|
"learning_rate": 3.556545654351749e-05, |
|
"loss": 311.8407, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.8109569291764281, |
|
"grad_norm": 2344.2255859375, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 303.5129, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.856010091908452, |
|
"grad_norm": 2038.534912109375, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 303.3366, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.9010632546404758, |
|
"grad_norm": 1972.404296875, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 310.6575, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.9461164173724995, |
|
"grad_norm": 0.0, |
|
"learning_rate": 2.717889356869146e-05, |
|
"loss": 291.44, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.9911695801045234, |
|
"grad_norm": 3448.444091796875, |
|
"learning_rate": 2.5e-05, |
|
"loss": 303.3335, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 342.4247741699219, |
|
"eval_runtime": 247.7832, |
|
"eval_samples_per_second": 39.813, |
|
"eval_steps_per_second": 4.98, |
|
"step": 11098 |
|
}, |
|
{ |
|
"epoch": 1.0362227428365471, |
|
"grad_norm": 4117.76806640625, |
|
"learning_rate": 2.2821106431308544e-05, |
|
"loss": 283.8551, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.081275905568571, |
|
"grad_norm": 2747.44287109375, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 268.1614, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.1263290683005946, |
|
"grad_norm": 2435.322265625, |
|
"learning_rate": 1.852952387243698e-05, |
|
"loss": 270.2856, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.1713822310326185, |
|
"grad_norm": 3295.06005859375, |
|
"learning_rate": 1.6449496416858284e-05, |
|
"loss": 258.1864, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.2164353937646424, |
|
"grad_norm": 4224.72412109375, |
|
"learning_rate": 1.443454345648252e-05, |
|
"loss": 264.1837, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.261488556496666, |
|
"grad_norm": 2986.144775390625, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 261.3931, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.30654171922869, |
|
"grad_norm": 2386.795166015625, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 264.0925, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.3515948819607138, |
|
"grad_norm": 2897.310791015625, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 263.9691, |
|
"step": 15000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 20000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 5000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|