dima806's picture
Upload folder using huggingface_hub
677a506
{
"best_metric": 0.5168846845626831,
"best_model_checkpoint": "mushrooms_image_detection/checkpoint-19350",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 19350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.26,
"learning_rate": 9.766839378238344e-06,
"loss": 4.0231,
"step": 500
},
{
"epoch": 0.52,
"learning_rate": 9.50777202072539e-06,
"loss": 3.1763,
"step": 1000
},
{
"epoch": 0.78,
"learning_rate": 9.248704663212435e-06,
"loss": 2.7159,
"step": 1500
},
{
"epoch": 1.0,
"eval_accuracy": 0.6579440460037475,
"eval_loss": 2.335684061050415,
"eval_runtime": 255.9186,
"eval_samples_per_second": 60.476,
"eval_steps_per_second": 7.561,
"step": 1935
},
{
"epoch": 1.03,
"learning_rate": 8.989637305699482e-06,
"loss": 2.3776,
"step": 2000
},
{
"epoch": 1.29,
"learning_rate": 8.730569948186529e-06,
"loss": 2.0857,
"step": 2500
},
{
"epoch": 1.55,
"learning_rate": 8.471502590673576e-06,
"loss": 1.8722,
"step": 3000
},
{
"epoch": 1.81,
"learning_rate": 8.212435233160623e-06,
"loss": 1.6815,
"step": 3500
},
{
"epoch": 2.0,
"eval_accuracy": 0.7631323899980617,
"eval_loss": 1.5546201467514038,
"eval_runtime": 273.4727,
"eval_samples_per_second": 56.594,
"eval_steps_per_second": 7.076,
"step": 3870
},
{
"epoch": 2.07,
"learning_rate": 7.953367875647669e-06,
"loss": 1.5331,
"step": 4000
},
{
"epoch": 2.33,
"learning_rate": 7.694300518134716e-06,
"loss": 1.3831,
"step": 4500
},
{
"epoch": 2.58,
"learning_rate": 7.435233160621762e-06,
"loss": 1.2778,
"step": 5000
},
{
"epoch": 2.84,
"learning_rate": 7.176165803108809e-06,
"loss": 1.1659,
"step": 5500
},
{
"epoch": 3.0,
"eval_accuracy": 0.8074562253666732,
"eval_loss": 1.152724027633667,
"eval_runtime": 246.7059,
"eval_samples_per_second": 62.735,
"eval_steps_per_second": 7.843,
"step": 5805
},
{
"epoch": 3.1,
"learning_rate": 6.917098445595856e-06,
"loss": 1.0974,
"step": 6000
},
{
"epoch": 3.36,
"learning_rate": 6.658031088082902e-06,
"loss": 0.9875,
"step": 6500
},
{
"epoch": 3.62,
"learning_rate": 6.398963730569949e-06,
"loss": 0.9223,
"step": 7000
},
{
"epoch": 3.88,
"learning_rate": 6.139896373056995e-06,
"loss": 0.8759,
"step": 7500
},
{
"epoch": 4.0,
"eval_accuracy": 0.8385345997286295,
"eval_loss": 0.8977574110031128,
"eval_runtime": 218.7108,
"eval_samples_per_second": 70.765,
"eval_steps_per_second": 8.847,
"step": 7740
},
{
"epoch": 4.13,
"learning_rate": 5.880829015544042e-06,
"loss": 0.809,
"step": 8000
},
{
"epoch": 4.39,
"learning_rate": 5.621761658031088e-06,
"loss": 0.7397,
"step": 8500
},
{
"epoch": 4.65,
"learning_rate": 5.3626943005181356e-06,
"loss": 0.7194,
"step": 9000
},
{
"epoch": 4.91,
"learning_rate": 5.103626943005182e-06,
"loss": 0.6707,
"step": 9500
},
{
"epoch": 5.0,
"eval_accuracy": 0.8570136331330361,
"eval_loss": 0.7434917092323303,
"eval_runtime": 247.4876,
"eval_samples_per_second": 62.536,
"eval_steps_per_second": 7.819,
"step": 9675
},
{
"epoch": 5.17,
"learning_rate": 4.844559585492228e-06,
"loss": 0.6323,
"step": 10000
},
{
"epoch": 5.43,
"learning_rate": 4.585492227979275e-06,
"loss": 0.6029,
"step": 10500
},
{
"epoch": 5.68,
"learning_rate": 4.326424870466322e-06,
"loss": 0.558,
"step": 11000
},
{
"epoch": 5.94,
"learning_rate": 4.067357512953368e-06,
"loss": 0.5433,
"step": 11500
},
{
"epoch": 6.0,
"eval_accuracy": 0.8712928862182594,
"eval_loss": 0.6399914622306824,
"eval_runtime": 221.8619,
"eval_samples_per_second": 69.76,
"eval_steps_per_second": 8.722,
"step": 11610
},
{
"epoch": 6.2,
"learning_rate": 3.808290155440415e-06,
"loss": 0.5238,
"step": 12000
},
{
"epoch": 6.46,
"learning_rate": 3.549222797927461e-06,
"loss": 0.4873,
"step": 12500
},
{
"epoch": 6.72,
"learning_rate": 3.2901554404145083e-06,
"loss": 0.4666,
"step": 13000
},
{
"epoch": 6.98,
"learning_rate": 3.031088082901555e-06,
"loss": 0.4732,
"step": 13500
},
{
"epoch": 7.0,
"eval_accuracy": 0.8785294307682368,
"eval_loss": 0.5811490416526794,
"eval_runtime": 216.0872,
"eval_samples_per_second": 71.624,
"eval_steps_per_second": 8.955,
"step": 13545
},
{
"epoch": 7.24,
"learning_rate": 2.772020725388601e-06,
"loss": 0.4282,
"step": 14000
},
{
"epoch": 7.49,
"learning_rate": 2.512953367875648e-06,
"loss": 0.4227,
"step": 14500
},
{
"epoch": 7.75,
"learning_rate": 2.2538860103626944e-06,
"loss": 0.4143,
"step": 15000
},
{
"epoch": 8.0,
"eval_accuracy": 0.8800801188860891,
"eval_loss": 0.5482025742530823,
"eval_runtime": 199.6943,
"eval_samples_per_second": 77.503,
"eval_steps_per_second": 9.69,
"step": 15480
},
{
"epoch": 8.01,
"learning_rate": 1.994818652849741e-06,
"loss": 0.405,
"step": 15500
},
{
"epoch": 8.27,
"learning_rate": 1.7357512953367877e-06,
"loss": 0.3876,
"step": 16000
},
{
"epoch": 8.53,
"learning_rate": 1.4766839378238342e-06,
"loss": 0.3792,
"step": 16500
},
{
"epoch": 8.79,
"learning_rate": 1.217616580310881e-06,
"loss": 0.3751,
"step": 17000
},
{
"epoch": 9.0,
"eval_accuracy": 0.8842798992052724,
"eval_loss": 0.525115430355072,
"eval_runtime": 235.2802,
"eval_samples_per_second": 65.781,
"eval_steps_per_second": 8.224,
"step": 17415
},
{
"epoch": 9.04,
"learning_rate": 9.585492227979275e-07,
"loss": 0.371,
"step": 17500
},
{
"epoch": 9.3,
"learning_rate": 6.994818652849742e-07,
"loss": 0.3548,
"step": 18000
},
{
"epoch": 9.56,
"learning_rate": 4.404145077720207e-07,
"loss": 0.3594,
"step": 18500
},
{
"epoch": 9.82,
"learning_rate": 1.8134715025906736e-07,
"loss": 0.3606,
"step": 19000
},
{
"epoch": 10.0,
"eval_accuracy": 0.8859598113329457,
"eval_loss": 0.5168846845626831,
"eval_runtime": 201.8296,
"eval_samples_per_second": 76.683,
"eval_steps_per_second": 9.587,
"step": 19350
}
],
"logging_steps": 500,
"max_steps": 19350,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 4.801355152861962e+19,
"trial_name": null,
"trial_params": null
}