|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 12.77139208173691, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.9401e-05, |
|
"loss": 0.4385, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 5.8802000000000004e-05, |
|
"loss": 0.2426, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 5.8203e-05, |
|
"loss": 0.2014, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.7604e-05, |
|
"loss": 0.1617, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 5.7005e-05, |
|
"loss": 0.1427, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 5.6406e-05, |
|
"loss": 0.1437, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 5.5806999999999996e-05, |
|
"loss": 0.1378, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9662713706913442, |
|
"eval_accuracy_Fallo burbuja": 0.36542026187745924, |
|
"eval_accuracy_Fallo cohesivo": 0.9723729862070237, |
|
"eval_accuracy_Fallo malla": 0.5530634677886148, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.6756717203211999, |
|
"eval_iou_Fallo burbuja": 0.33507215062890655, |
|
"eval_iou_Fallo cohesivo": 0.9038180929432679, |
|
"eval_iou_Fallo malla": 0.5326998276937392, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.2676931321620941, |
|
"eval_mean_accuracy": 0.7142820216411104, |
|
"eval_mean_iou": 0.48945235831742273, |
|
"eval_overall_accuracy": 0.9121759924788573, |
|
"eval_runtime": 612.0768, |
|
"eval_samples_per_second": 1.139, |
|
"eval_steps_per_second": 0.144, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 5.5208000000000004e-05, |
|
"loss": 0.1351, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 5.4609000000000005e-05, |
|
"loss": 0.12, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 5.401e-05, |
|
"loss": 0.1232, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 5.3411e-05, |
|
"loss": 0.0971, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 5.2812e-05, |
|
"loss": 0.1259, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 5.2213e-05, |
|
"loss": 0.1132, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 5.1614000000000004e-05, |
|
"loss": 0.1248, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.1015e-05, |
|
"loss": 0.1117, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9704853442322338, |
|
"eval_accuracy_Fallo burbuja": 0.49737471249900295, |
|
"eval_accuracy_Fallo cohesivo": 0.9506752018582307, |
|
"eval_accuracy_Fallo malla": 0.7726879768233803, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.5875544518706911, |
|
"eval_iou_Fallo burbuja": 0.454946829125604, |
|
"eval_iou_Fallo cohesivo": 0.921380238456499, |
|
"eval_iou_Fallo malla": 0.6807990937714024, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.23053236305713654, |
|
"eval_mean_accuracy": 0.797805808853212, |
|
"eval_mean_iou": 0.5289361226448392, |
|
"eval_overall_accuracy": 0.9245744163230756, |
|
"eval_runtime": 548.4918, |
|
"eval_samples_per_second": 1.271, |
|
"eval_steps_per_second": 0.16, |
|
"step": 1566 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 5.0416e-05, |
|
"loss": 0.1014, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 4.9817e-05, |
|
"loss": 0.1002, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.9218e-05, |
|
"loss": 0.1095, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.8619e-05, |
|
"loss": 0.1212, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 4.8020000000000004e-05, |
|
"loss": 0.1181, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.7421000000000006e-05, |
|
"loss": 0.0797, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.6822e-05, |
|
"loss": 0.0993, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 4.6223e-05, |
|
"loss": 0.0881, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9389145460314162, |
|
"eval_accuracy_Fallo burbuja": 0.4974500866070455, |
|
"eval_accuracy_Fallo cohesivo": 0.9711667867917438, |
|
"eval_accuracy_Fallo malla": 0.7391436559272196, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.7322969887464521, |
|
"eval_iou_Fallo burbuja": 0.4394330979860809, |
|
"eval_iou_Fallo cohesivo": 0.9273266448542613, |
|
"eval_iou_Fallo malla": 0.6790108345674684, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.20405055582523346, |
|
"eval_mean_accuracy": 0.7866687688393563, |
|
"eval_mean_iou": 0.5556135132308525, |
|
"eval_overall_accuracy": 0.9354244787352022, |
|
"eval_runtime": 964.4262, |
|
"eval_samples_per_second": 0.723, |
|
"eval_steps_per_second": 0.091, |
|
"step": 2349 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.5624e-05, |
|
"loss": 0.0922, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.5025000000000003e-05, |
|
"loss": 0.0835, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 4.4426000000000005e-05, |
|
"loss": 0.0826, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 4.3827e-05, |
|
"loss": 0.0981, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 4.3228e-05, |
|
"loss": 0.089, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 4.2629e-05, |
|
"loss": 0.0792, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 4.203e-05, |
|
"loss": 0.0879, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 4.1431e-05, |
|
"loss": 0.0878, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9601651650339057, |
|
"eval_accuracy_Fallo burbuja": 0.46057019415053013, |
|
"eval_accuracy_Fallo cohesivo": 0.9556004227780159, |
|
"eval_accuracy_Fallo malla": 0.8246678902521215, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.7373161209588962, |
|
"eval_iou_Fallo burbuja": 0.4351806715745556, |
|
"eval_iou_Fallo cohesivo": 0.9261166583488046, |
|
"eval_iou_Fallo malla": 0.6934883209443176, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.19842571020126343, |
|
"eval_mean_accuracy": 0.8002509180536432, |
|
"eval_mean_iou": 0.5584203543653148, |
|
"eval_overall_accuracy": 0.9346409949517239, |
|
"eval_runtime": 965.3379, |
|
"eval_samples_per_second": 0.722, |
|
"eval_steps_per_second": 0.091, |
|
"step": 3132 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 4.0832e-05, |
|
"loss": 0.0891, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 4.0233e-05, |
|
"loss": 0.0859, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.9634e-05, |
|
"loss": 0.0836, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 3.9035e-05, |
|
"loss": 0.0791, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 3.8436e-05, |
|
"loss": 0.0684, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 3.7837000000000004e-05, |
|
"loss": 0.081, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 3.7238000000000005e-05, |
|
"loss": 0.082, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 3.6639e-05, |
|
"loss": 0.0895, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9719294262598982, |
|
"eval_accuracy_Fallo burbuja": 0.46524631599899596, |
|
"eval_accuracy_Fallo cohesivo": 0.9136890424802744, |
|
"eval_accuracy_Fallo malla": 0.8834270080176291, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.6592962930147998, |
|
"eval_iou_Fallo burbuja": 0.4365248402255381, |
|
"eval_iou_Fallo cohesivo": 0.8963782328935307, |
|
"eval_iou_Fallo malla": 0.6308508105949046, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.2841300666332245, |
|
"eval_mean_accuracy": 0.8085729481891994, |
|
"eval_mean_iou": 0.5246100353457546, |
|
"eval_overall_accuracy": 0.9087762528940668, |
|
"eval_runtime": 437.5828, |
|
"eval_samples_per_second": 1.593, |
|
"eval_steps_per_second": 0.201, |
|
"step": 3915 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 3.604e-05, |
|
"loss": 0.0769, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 3.544100000000001e-05, |
|
"loss": 0.0723, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 3.4842e-05, |
|
"loss": 0.0812, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 5.49, |
|
"learning_rate": 3.4243000000000004e-05, |
|
"loss": 0.0707, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 3.3644000000000005e-05, |
|
"loss": 0.0757, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 3.3045000000000006e-05, |
|
"loss": 0.079, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 5.87, |
|
"learning_rate": 3.2446e-05, |
|
"loss": 0.0773, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9384453125763308, |
|
"eval_accuracy_Fallo burbuja": 0.529126238458797, |
|
"eval_accuracy_Fallo cohesivo": 0.9775170075360738, |
|
"eval_accuracy_Fallo malla": 0.6843044514450518, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.781980107039472, |
|
"eval_iou_Fallo burbuja": 0.4812632411288181, |
|
"eval_iou_Fallo cohesivo": 0.9251146577575409, |
|
"eval_iou_Fallo malla": 0.6377730807830806, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.25467532873153687, |
|
"eval_mean_accuracy": 0.7823482525040634, |
|
"eval_mean_iou": 0.5652262173417822, |
|
"eval_overall_accuracy": 0.9336352918386095, |
|
"eval_runtime": 510.4141, |
|
"eval_samples_per_second": 1.366, |
|
"eval_steps_per_second": 0.172, |
|
"step": 4698 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 3.1847e-05, |
|
"loss": 0.0818, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 3.1248e-05, |
|
"loss": 0.0691, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 3.0649000000000004e-05, |
|
"loss": 0.0776, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 3.0050000000000002e-05, |
|
"loss": 0.0679, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 6.51, |
|
"learning_rate": 2.9451e-05, |
|
"loss": 0.0664, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 2.8851999999999998e-05, |
|
"loss": 0.0719, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 6.77, |
|
"learning_rate": 2.8253e-05, |
|
"loss": 0.0647, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 2.7653999999999996e-05, |
|
"loss": 0.0667, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9689390593013384, |
|
"eval_accuracy_Fallo burbuja": 0.5689076681812227, |
|
"eval_accuracy_Fallo cohesivo": 0.9741324025661836, |
|
"eval_accuracy_Fallo malla": 0.6609101336050125, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.7547767945125146, |
|
"eval_iou_Fallo burbuja": 0.5093440431888464, |
|
"eval_iou_Fallo cohesivo": 0.9203160923411682, |
|
"eval_iou_Fallo malla": 0.6202328609065462, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.27260345220565796, |
|
"eval_mean_accuracy": 0.7932223159134393, |
|
"eval_mean_iou": 0.5609339581898151, |
|
"eval_overall_accuracy": 0.9295240840483698, |
|
"eval_runtime": 558.5673, |
|
"eval_samples_per_second": 1.248, |
|
"eval_steps_per_second": 0.158, |
|
"step": 5481 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 2.7054999999999998e-05, |
|
"loss": 0.0729, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 2.6455999999999995e-05, |
|
"loss": 0.065, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 7.28, |
|
"learning_rate": 2.5857e-05, |
|
"loss": 0.0668, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 2.5258e-05, |
|
"loss": 0.0674, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 7.54, |
|
"learning_rate": 2.4659e-05, |
|
"loss": 0.0647, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 2.406e-05, |
|
"loss": 0.0617, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 2.3460999999999998e-05, |
|
"loss": 0.0806, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 2.2862e-05, |
|
"loss": 0.0678, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9713176229687334, |
|
"eval_accuracy_Fallo burbuja": 0.5291833178804215, |
|
"eval_accuracy_Fallo cohesivo": 0.9443325703194282, |
|
"eval_accuracy_Fallo malla": 0.7561045015815989, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.5900108654281505, |
|
"eval_iou_Fallo burbuja": 0.4821980835783872, |
|
"eval_iou_Fallo cohesivo": 0.9088805764492143, |
|
"eval_iou_Fallo malla": 0.6569526752270272, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.2949639856815338, |
|
"eval_mean_accuracy": 0.8002345031875455, |
|
"eval_mean_iou": 0.5276084401365558, |
|
"eval_overall_accuracy": 0.9175235099027457, |
|
"eval_runtime": 974.0691, |
|
"eval_samples_per_second": 0.716, |
|
"eval_steps_per_second": 0.09, |
|
"step": 6264 |
|
}, |
|
{ |
|
"epoch": 8.05, |
|
"learning_rate": 2.2263e-05, |
|
"loss": 0.0623, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 8.17, |
|
"learning_rate": 2.1663999999999998e-05, |
|
"loss": 0.068, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 8.3, |
|
"learning_rate": 2.1065e-05, |
|
"loss": 0.0653, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 2.0465999999999997e-05, |
|
"loss": 0.0576, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 1.9866999999999998e-05, |
|
"loss": 0.0642, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 1.9267999999999996e-05, |
|
"loss": 0.0623, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 1.8669e-05, |
|
"loss": 0.0606, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 1.807e-05, |
|
"loss": 0.0653, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9640849999048677, |
|
"eval_accuracy_Fallo burbuja": 0.44252943797991534, |
|
"eval_accuracy_Fallo cohesivo": 0.9689748435400354, |
|
"eval_accuracy_Fallo malla": 0.6971461749611119, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.7587978992012424, |
|
"eval_iou_Fallo burbuja": 0.42281857139560947, |
|
"eval_iou_Fallo cohesivo": 0.9188514484377671, |
|
"eval_iou_Fallo malla": 0.6329732025381335, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.2711799442768097, |
|
"eval_mean_accuracy": 0.7681838640964825, |
|
"eval_mean_iou": 0.5466882243145504, |
|
"eval_overall_accuracy": 0.9288374770316719, |
|
"eval_runtime": 975.2422, |
|
"eval_samples_per_second": 0.715, |
|
"eval_steps_per_second": 0.09, |
|
"step": 7047 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 1.7471e-05, |
|
"loss": 0.0596, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 1.6872e-05, |
|
"loss": 0.0622, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 9.32, |
|
"learning_rate": 1.6272999999999998e-05, |
|
"loss": 0.0587, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 1.5674e-05, |
|
"loss": 0.0618, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 1.5075000000000002e-05, |
|
"loss": 0.0608, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 1.4476e-05, |
|
"loss": 0.0601, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 1.3877e-05, |
|
"loss": 0.0597, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 1.3277999999999999e-05, |
|
"loss": 0.0646, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9688121734327988, |
|
"eval_accuracy_Fallo burbuja": 0.5067723270182515, |
|
"eval_accuracy_Fallo cohesivo": 0.9681219108003379, |
|
"eval_accuracy_Fallo malla": 0.684018001766896, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.7344708727413036, |
|
"eval_iou_Fallo burbuja": 0.47277934419738255, |
|
"eval_iou_Fallo cohesivo": 0.9177986519417454, |
|
"eval_iou_Fallo malla": 0.6242755098693177, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.28413864970207214, |
|
"eval_mean_accuracy": 0.781931103254571, |
|
"eval_mean_iou": 0.5498648757499499, |
|
"eval_overall_accuracy": 0.9271530652326141, |
|
"eval_runtime": 962.2359, |
|
"eval_samples_per_second": 0.724, |
|
"eval_steps_per_second": 0.091, |
|
"step": 7830 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"learning_rate": 1.2678999999999998e-05, |
|
"loss": 0.0535, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"learning_rate": 1.2079999999999998e-05, |
|
"loss": 0.0654, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 1.1480999999999997e-05, |
|
"loss": 0.0544, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"learning_rate": 1.0882000000000004e-05, |
|
"loss": 0.059, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 10.6, |
|
"learning_rate": 1.0283000000000003e-05, |
|
"loss": 0.0593, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 9.684000000000002e-06, |
|
"loss": 0.0591, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"learning_rate": 9.085000000000002e-06, |
|
"loss": 0.0548, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"learning_rate": 8.486000000000001e-06, |
|
"loss": 0.057, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_accuracy_Fallo adhesivo": 0.9739099771116919, |
|
"eval_accuracy_Fallo burbuja": 0.524183014188627, |
|
"eval_accuracy_Fallo cohesivo": 0.9592554275269433, |
|
"eval_accuracy_Fallo malla": 0.655457223140754, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.6319458306810046, |
|
"eval_iou_Fallo burbuja": 0.48480026747414257, |
|
"eval_iou_Fallo cohesivo": 0.9074842927285548, |
|
"eval_iou_Fallo malla": 0.604024431195969, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.33727791905403137, |
|
"eval_mean_accuracy": 0.7782014104920041, |
|
"eval_mean_iou": 0.5256509644159343, |
|
"eval_overall_accuracy": 0.9165530753985491, |
|
"eval_runtime": 968.6795, |
|
"eval_samples_per_second": 0.72, |
|
"eval_steps_per_second": 0.091, |
|
"step": 8613 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 7.887000000000001e-06, |
|
"loss": 0.0624, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 11.24, |
|
"learning_rate": 7.2879999999999995e-06, |
|
"loss": 0.0533, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 11.37, |
|
"learning_rate": 6.688999999999999e-06, |
|
"loss": 0.0525, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 11.49, |
|
"learning_rate": 6.0899999999999984e-06, |
|
"loss": 0.0543, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"learning_rate": 5.490999999999998e-06, |
|
"loss": 0.0549, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 11.75, |
|
"learning_rate": 4.891999999999997e-06, |
|
"loss": 0.0628, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 11.88, |
|
"learning_rate": 4.292999999999997e-06, |
|
"loss": 0.0591, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy_Fallo adhesivo": 0.970520311766421, |
|
"eval_accuracy_Fallo burbuja": 0.5462910450437133, |
|
"eval_accuracy_Fallo cohesivo": 0.9656092838621118, |
|
"eval_accuracy_Fallo malla": 0.6775555839377438, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.718221180704075, |
|
"eval_iou_Fallo burbuja": 0.5019128082176123, |
|
"eval_iou_Fallo cohesivo": 0.9148455041266497, |
|
"eval_iou_Fallo malla": 0.6171864831472653, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.3082430958747864, |
|
"eval_mean_accuracy": 0.7899940561524975, |
|
"eval_mean_iou": 0.5504331952391205, |
|
"eval_overall_accuracy": 0.9246537176842095, |
|
"eval_runtime": 968.1482, |
|
"eval_samples_per_second": 0.72, |
|
"eval_steps_per_second": 0.091, |
|
"step": 9396 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 3.694000000000003e-06, |
|
"loss": 0.0577, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 12.13, |
|
"learning_rate": 3.0950000000000026e-06, |
|
"loss": 0.0559, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 12.26, |
|
"learning_rate": 2.496000000000002e-06, |
|
"loss": 0.0492, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 12.39, |
|
"learning_rate": 1.8970000000000013e-06, |
|
"loss": 0.05, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 1.298000000000001e-06, |
|
"loss": 0.0584, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 12.64, |
|
"learning_rate": 6.990000000000005e-07, |
|
"loss": 0.0543, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"learning_rate": 1e-07, |
|
"loss": 0.053, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"eval_accuracy_Fallo adhesivo": 0.9727257090053227, |
|
"eval_accuracy_Fallo burbuja": 0.5291350199082777, |
|
"eval_accuracy_Fallo cohesivo": 0.9668179190509752, |
|
"eval_accuracy_Fallo malla": 0.6807574657546112, |
|
"eval_accuracy_bg": NaN, |
|
"eval_iou_Fallo adhesivo": 0.730670019716971, |
|
"eval_iou_Fallo burbuja": 0.49027314919974097, |
|
"eval_iou_Fallo cohesivo": 0.9167237256674108, |
|
"eval_iou_Fallo malla": 0.6189086162075905, |
|
"eval_iou_bg": 0.0, |
|
"eval_loss": 0.3095340430736542, |
|
"eval_mean_accuracy": 0.7873590284297967, |
|
"eval_mean_iou": 0.5513151021583427, |
|
"eval_overall_accuracy": 0.9260218134108001, |
|
"eval_runtime": 971.1054, |
|
"eval_samples_per_second": 0.718, |
|
"eval_steps_per_second": 0.091, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"step": 10000, |
|
"total_flos": 1.4021067262320968e+18, |
|
"train_loss": 0.08475520811080933, |
|
"train_runtime": 70131.561, |
|
"train_samples_per_second": 1.141, |
|
"train_steps_per_second": 0.143 |
|
} |
|
], |
|
"max_steps": 10000, |
|
"num_train_epochs": 13, |
|
"total_flos": 1.4021067262320968e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|