|
{ |
|
"best_metric": 0.0413714237511158, |
|
"best_model_checkpoint": "./Birds-Or-Not/checkpoint-225", |
|
"epoch": 5.813953488372093, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0001922480620155039, |
|
"loss": 0.512, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00018449612403100774, |
|
"loss": 0.1901, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_accuracy": 0.9685863874345549, |
|
"eval_loss": 0.10355287045240402, |
|
"eval_runtime": 3.6181, |
|
"eval_samples_per_second": 52.791, |
|
"eval_steps_per_second": 6.633, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017674418604651164, |
|
"loss": 0.3262, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001689922480620155, |
|
"loss": 0.1114, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00016124031007751937, |
|
"loss": 0.0367, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_accuracy": 0.9738219895287958, |
|
"eval_loss": 0.06718893349170685, |
|
"eval_runtime": 3.0958, |
|
"eval_samples_per_second": 61.697, |
|
"eval_steps_per_second": 7.753, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00015348837209302327, |
|
"loss": 0.0738, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00014573643410852714, |
|
"loss": 0.0101, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_accuracy": 0.9790575916230366, |
|
"eval_loss": 0.060171037912368774, |
|
"eval_runtime": 3.0912, |
|
"eval_samples_per_second": 61.789, |
|
"eval_steps_per_second": 7.764, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.000137984496124031, |
|
"loss": 0.0982, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.0001302325581395349, |
|
"loss": 0.0409, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 0.00012248062015503876, |
|
"loss": 0.0079, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_accuracy": 0.9790575916230366, |
|
"eval_loss": 0.06641152501106262, |
|
"eval_runtime": 3.0752, |
|
"eval_samples_per_second": 62.11, |
|
"eval_steps_per_second": 7.804, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 0.00011472868217054265, |
|
"loss": 0.0075, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00010697674418604651, |
|
"loss": 0.0295, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"eval_accuracy": 0.9685863874345549, |
|
"eval_loss": 0.09663180261850357, |
|
"eval_runtime": 3.2748, |
|
"eval_samples_per_second": 58.325, |
|
"eval_steps_per_second": 7.329, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 9.92248062015504e-05, |
|
"loss": 0.0301, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 9.147286821705426e-05, |
|
"loss": 0.0095, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 8.372093023255814e-05, |
|
"loss": 0.0066, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"eval_accuracy": 0.9790575916230366, |
|
"eval_loss": 0.04968209192156792, |
|
"eval_runtime": 3.1516, |
|
"eval_samples_per_second": 60.603, |
|
"eval_steps_per_second": 7.615, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 7.596899224806202e-05, |
|
"loss": 0.0037, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 6.821705426356589e-05, |
|
"loss": 0.0036, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"eval_accuracy": 0.9842931937172775, |
|
"eval_loss": 0.06203766167163849, |
|
"eval_runtime": 3.6519, |
|
"eval_samples_per_second": 52.302, |
|
"eval_steps_per_second": 6.572, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 6.0465116279069765e-05, |
|
"loss": 0.0034, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 5.271317829457365e-05, |
|
"loss": 0.0033, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 4.496124031007753e-05, |
|
"loss": 0.0063, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"eval_accuracy": 0.9842931937172775, |
|
"eval_loss": 0.0426977165043354, |
|
"eval_runtime": 3.2933, |
|
"eval_samples_per_second": 57.997, |
|
"eval_steps_per_second": 7.288, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 3.7209302325581394e-05, |
|
"loss": 0.003, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 2.9457364341085275e-05, |
|
"loss": 0.0029, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"eval_accuracy": 0.9842931937172775, |
|
"eval_loss": 0.0413714237511158, |
|
"eval_runtime": 3.0838, |
|
"eval_samples_per_second": 61.937, |
|
"eval_steps_per_second": 7.783, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.35, |
|
"learning_rate": 2.170542635658915e-05, |
|
"loss": 0.0029, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 1.3953488372093024e-05, |
|
"loss": 0.0028, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 6.2015503875969e-06, |
|
"loss": 0.0028, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"eval_accuracy": 0.9842931937172775, |
|
"eval_loss": 0.041494447737932205, |
|
"eval_runtime": 3.5969, |
|
"eval_samples_per_second": 53.102, |
|
"eval_steps_per_second": 6.672, |
|
"step": 250 |
|
} |
|
], |
|
"max_steps": 258, |
|
"num_train_epochs": 6, |
|
"total_flos": 3.09967958458368e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|