|
{ |
|
"best_metric": 0.568750262260437, |
|
"best_model_checkpoint": "./lora-alpaca/checkpoint-400", |
|
"epoch": 0.6683375104427736, |
|
"global_step": 400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 2.7003, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 2.566, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.2648, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00011099999999999999, |
|
"loss": 1.657, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00014099999999999998, |
|
"loss": 1.1599, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00017099999999999998, |
|
"loss": 0.9037, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.000201, |
|
"loss": 0.8137, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00023099999999999998, |
|
"loss": 0.7827, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.000261, |
|
"loss": 0.7554, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 0.7357, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0002957831325301205, |
|
"loss": 0.6893, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00028975903614457827, |
|
"loss": 0.6606, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0002837349397590361, |
|
"loss": 0.6506, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00027771084337349395, |
|
"loss": 0.6462, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0002716867469879518, |
|
"loss": 0.6315, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0002656626506024096, |
|
"loss": 0.6337, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00025963855421686746, |
|
"loss": 0.6223, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00025361445783132525, |
|
"loss": 0.6136, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00024759036144578314, |
|
"loss": 0.6198, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00024156626506024095, |
|
"loss": 0.6084, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 0.608456552028656, |
|
"eval_runtime": 123.856, |
|
"eval_samples_per_second": 16.148, |
|
"eval_steps_per_second": 1.009, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00023554216867469876, |
|
"loss": 0.6021, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0002295180722891566, |
|
"loss": 0.5949, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00022349397590361444, |
|
"loss": 0.5972, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00021746987951807228, |
|
"loss": 0.5922, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0002114457831325301, |
|
"loss": 0.5876, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00020542168674698793, |
|
"loss": 0.5788, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001993975903614458, |
|
"loss": 0.5894, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0001933734939759036, |
|
"loss": 0.5877, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018734939759036142, |
|
"loss": 0.5835, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018132530120481925, |
|
"loss": 0.5791, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00017530120481927712, |
|
"loss": 0.5841, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00016927710843373493, |
|
"loss": 0.5728, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00016325301204819274, |
|
"loss": 0.569, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00015722891566265058, |
|
"loss": 0.5709, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00015120481927710845, |
|
"loss": 0.5762, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00014518072289156626, |
|
"loss": 0.5704, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001391566265060241, |
|
"loss": 0.5661, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00013313253012048193, |
|
"loss": 0.5662, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00012710843373493975, |
|
"loss": 0.5674, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00012108433734939758, |
|
"loss": 0.5635, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_loss": 0.568750262260437, |
|
"eval_runtime": 122.9061, |
|
"eval_samples_per_second": 16.273, |
|
"eval_steps_per_second": 1.017, |
|
"step": 400 |
|
} |
|
], |
|
"max_steps": 598, |
|
"num_train_epochs": 1, |
|
"total_flos": 3.4431112456647475e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|