|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 6.289308176100629, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5806451612903226, |
|
"eval_loss": 0.19493919610977173, |
|
"eval_runtime": 1.9862, |
|
"eval_samples_per_second": 1560.769, |
|
"eval_steps_per_second": 32.726, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"grad_norm": 0.613143265247345, |
|
"learning_rate": 1.550763701707098e-05, |
|
"loss": 0.312, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8112903225806452, |
|
"eval_loss": 0.0981183648109436, |
|
"eval_runtime": 2.3793, |
|
"eval_samples_per_second": 1302.89, |
|
"eval_steps_per_second": 27.319, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.87, |
|
"eval_loss": 0.06753243505954742, |
|
"eval_runtime": 3.757, |
|
"eval_samples_per_second": 825.133, |
|
"eval_steps_per_second": 17.301, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"grad_norm": 0.5029990077018738, |
|
"learning_rate": 1.101527403414196e-05, |
|
"loss": 0.1131, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8935483870967742, |
|
"eval_loss": 0.0535094179213047, |
|
"eval_runtime": 2.65, |
|
"eval_samples_per_second": 1169.792, |
|
"eval_steps_per_second": 24.528, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.72, |
|
"grad_norm": 0.37056881189346313, |
|
"learning_rate": 6.522911051212939e-06, |
|
"loss": 0.0753, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.896774193548387, |
|
"eval_loss": 0.04588622599840164, |
|
"eval_runtime": 2.2713, |
|
"eval_samples_per_second": 1364.869, |
|
"eval_steps_per_second": 28.618, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9048387096774193, |
|
"eval_loss": 0.04209807887673378, |
|
"eval_runtime": 4.2011, |
|
"eval_samples_per_second": 737.9, |
|
"eval_steps_per_second": 15.472, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"grad_norm": 0.3099181354045868, |
|
"learning_rate": 2.0305480682839176e-06, |
|
"loss": 0.0622, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2226, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 7, |
|
"save_steps": 500, |
|
"total_flos": 520991326672152.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.5340221221277321, |
|
"num_train_epochs": 7, |
|
"temperature": 16 |
|
} |
|
} |
|
|