|
{ |
|
"best_metric": 0.01912616565823555, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/parking-terrain/checkpoint-560", |
|
"epoch": 140.0, |
|
"global_step": 560, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"loss": 1.3152, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 1.3189, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.25e-06, |
|
"loss": 1.3046, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 1.3124, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"loss": 1.3014, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.2884, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.916666666666667e-06, |
|
"loss": 1.2836, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.266, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 3.75e-06, |
|
"loss": 1.2544, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.2427, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.583333333333333e-06, |
|
"loss": 1.2271, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 5e-06, |
|
"loss": 1.2266, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 5.416666666666667e-06, |
|
"loss": 1.2002, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 5.833333333333334e-06, |
|
"loss": 1.1886, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 6.25e-06, |
|
"loss": 1.1605, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.1389, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 7.083333333333334e-06, |
|
"loss": 1.1115, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 7.5e-06, |
|
"loss": 1.0863, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 7.916666666666667e-06, |
|
"loss": 1.0763, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.0322, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_ELSE": 0.7236194227710022, |
|
"eval_accuracy_road": 0.9371333371304893, |
|
"eval_accuracy_sidewalk": 0.5361869895803999, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.7051860054312004, |
|
"eval_iou_road": 0.6015263687048716, |
|
"eval_iou_sidewalk": 0.40198458777578383, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.1061346530914307, |
|
"eval_mean_accuracy": 0.7323132498272971, |
|
"eval_mean_iou": 0.42717424047796393, |
|
"eval_overall_accuracy": 0.7757415771484375, |
|
"eval_runtime": 0.7193, |
|
"eval_samples_per_second": 1.39, |
|
"eval_steps_per_second": 1.39, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 5.25, |
|
"learning_rate": 8.75e-06, |
|
"loss": 1.0201, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 9.166666666666666e-06, |
|
"loss": 0.9859, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 9.583333333333334e-06, |
|
"loss": 0.9649, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9171, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 1.0416666666666668e-05, |
|
"loss": 0.8769, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.0833333333333334e-05, |
|
"loss": 0.882, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 6.75, |
|
"learning_rate": 1.125e-05, |
|
"loss": 0.8217, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.7833, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 7.25, |
|
"learning_rate": 1.2083333333333333e-05, |
|
"loss": 0.7459, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.7156, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 1.2916666666666668e-05, |
|
"loss": 0.6706, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.6556, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 8.25, |
|
"learning_rate": 1.3750000000000002e-05, |
|
"loss": 0.6085, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 1.4166666666666668e-05, |
|
"loss": 0.5801, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 8.75, |
|
"learning_rate": 1.4583333333333335e-05, |
|
"loss": 0.5008, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.5359, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 1.5416666666666668e-05, |
|
"loss": 0.4526, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.5833333333333333e-05, |
|
"loss": 0.461, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 1.6250000000000002e-05, |
|
"loss": 0.3945, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.3821, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_ELSE": 0.9475472637354313, |
|
"eval_accuracy_road": 0.9957851569174688, |
|
"eval_accuracy_sidewalk": 0.7203604618417347, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9428740645022344, |
|
"eval_iou_road": 0.876176157363904, |
|
"eval_iou_sidewalk": 0.5966180758017493, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.332696795463562, |
|
"eval_mean_accuracy": 0.8878976274982117, |
|
"eval_mean_iou": 0.805222765889296, |
|
"eval_overall_accuracy": 0.954315185546875, |
|
"eval_runtime": 0.6036, |
|
"eval_samples_per_second": 1.657, |
|
"eval_steps_per_second": 1.657, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 10.25, |
|
"learning_rate": 1.7083333333333333e-05, |
|
"loss": 0.3487, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 1.75e-05, |
|
"loss": 0.3294, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"learning_rate": 1.7916666666666667e-05, |
|
"loss": 0.3092, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.8333333333333333e-05, |
|
"loss": 0.2778, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 11.25, |
|
"learning_rate": 1.8750000000000002e-05, |
|
"loss": 0.2508, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 1.9166666666666667e-05, |
|
"loss": 0.2453, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 11.75, |
|
"learning_rate": 1.9583333333333333e-05, |
|
"loss": 0.2169, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 2e-05, |
|
"loss": 0.2321, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 2.0416666666666667e-05, |
|
"loss": 0.2083, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.1772, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 12.75, |
|
"learning_rate": 2.125e-05, |
|
"loss": 0.1643, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.1781, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 13.25, |
|
"learning_rate": 2.2083333333333333e-05, |
|
"loss": 0.1629, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 2.25e-05, |
|
"loss": 0.1367, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 13.75, |
|
"learning_rate": 2.2916666666666667e-05, |
|
"loss": 0.1678, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.1151, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 14.25, |
|
"learning_rate": 2.375e-05, |
|
"loss": 0.1134, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 2.4166666666666667e-05, |
|
"loss": 0.113, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 14.75, |
|
"learning_rate": 2.4583333333333332e-05, |
|
"loss": 0.1298, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.1146, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_ELSE": 0.9833345958639497, |
|
"eval_accuracy_road": 0.9951443868542462, |
|
"eval_accuracy_sidewalk": 0.7050126724866235, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9793074460861975, |
|
"eval_iou_road": 0.9435900897859988, |
|
"eval_iou_sidewalk": 0.6242363795038025, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.09062469005584717, |
|
"eval_mean_accuracy": 0.8944972184016065, |
|
"eval_mean_iou": 0.8490446384586662, |
|
"eval_overall_accuracy": 0.9789581298828125, |
|
"eval_runtime": 0.5877, |
|
"eval_samples_per_second": 1.701, |
|
"eval_steps_per_second": 1.701, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 15.25, |
|
"learning_rate": 2.5416666666666667e-05, |
|
"loss": 0.1079, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 2.5833333333333336e-05, |
|
"loss": 0.0953, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 15.75, |
|
"learning_rate": 2.625e-05, |
|
"loss": 0.094, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.1031, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 16.25, |
|
"learning_rate": 2.7083333333333332e-05, |
|
"loss": 0.0991, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 2.7500000000000004e-05, |
|
"loss": 0.09, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 16.75, |
|
"learning_rate": 2.791666666666667e-05, |
|
"loss": 0.0938, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 2.8333333333333335e-05, |
|
"loss": 0.0784, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 17.25, |
|
"learning_rate": 2.8749999999999997e-05, |
|
"loss": 0.074, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.0754, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 17.75, |
|
"learning_rate": 2.9583333333333335e-05, |
|
"loss": 0.0908, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0746, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 18.25, |
|
"learning_rate": 3.0416666666666666e-05, |
|
"loss": 0.0705, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 3.0833333333333335e-05, |
|
"loss": 0.0786, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 18.75, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.0759, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 0.0743, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 19.25, |
|
"learning_rate": 3.208333333333334e-05, |
|
"loss": 0.0665, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 3.2500000000000004e-05, |
|
"loss": 0.0642, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"learning_rate": 3.291666666666667e-05, |
|
"loss": 0.0707, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0668, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_ELSE": 0.9925168006752735, |
|
"eval_accuracy_road": 0.9965967989975508, |
|
"eval_accuracy_sidewalk": 0.6823430019712757, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9843781863455368, |
|
"eval_iou_road": 0.9669927325983033, |
|
"eval_iou_sidewalk": 0.663653793481238, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.05583948642015457, |
|
"eval_mean_accuracy": 0.8904855338813666, |
|
"eval_mean_iou": 0.8716749041416927, |
|
"eval_overall_accuracy": 0.9852066040039062, |
|
"eval_runtime": 0.5774, |
|
"eval_samples_per_second": 1.732, |
|
"eval_steps_per_second": 1.732, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 20.25, |
|
"learning_rate": 3.375000000000001e-05, |
|
"loss": 0.0614, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 3.4166666666666666e-05, |
|
"loss": 0.0692, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 20.75, |
|
"learning_rate": 3.458333333333333e-05, |
|
"loss": 0.0609, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.085, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 21.25, |
|
"learning_rate": 3.541666666666667e-05, |
|
"loss": 0.0666, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 3.5833333333333335e-05, |
|
"loss": 0.059, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 21.75, |
|
"learning_rate": 3.625e-05, |
|
"loss": 0.0564, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.0598, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 22.25, |
|
"learning_rate": 3.708333333333334e-05, |
|
"loss": 0.0542, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0556, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 22.75, |
|
"learning_rate": 3.791666666666667e-05, |
|
"loss": 0.0584, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 0.0563, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 23.25, |
|
"learning_rate": 3.875e-05, |
|
"loss": 0.0526, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 3.9166666666666665e-05, |
|
"loss": 0.0595, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 23.75, |
|
"learning_rate": 3.958333333333333e-05, |
|
"loss": 0.0521, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0637, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 24.25, |
|
"learning_rate": 4.041666666666667e-05, |
|
"loss": 0.0526, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.0833333333333334e-05, |
|
"loss": 0.0589, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 24.75, |
|
"learning_rate": 4.125e-05, |
|
"loss": 0.0513, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0491, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_ELSE": 0.9903145865572954, |
|
"eval_accuracy_road": 0.9915560745001993, |
|
"eval_accuracy_sidewalk": 0.8296254576175726, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9854569928657962, |
|
"eval_iou_road": 0.9708338561490094, |
|
"eval_iou_sidewalk": 0.7111647555823778, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.047440506517887115, |
|
"eval_mean_accuracy": 0.937165372891689, |
|
"eval_mean_iou": 0.8891518681990611, |
|
"eval_overall_accuracy": 0.9862937927246094, |
|
"eval_runtime": 0.7221, |
|
"eval_samples_per_second": 1.385, |
|
"eval_steps_per_second": 1.385, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 25.25, |
|
"learning_rate": 4.208333333333334e-05, |
|
"loss": 0.0548, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.25e-05, |
|
"loss": 0.0491, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 25.75, |
|
"learning_rate": 4.291666666666667e-05, |
|
"loss": 0.0552, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.0476, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 26.25, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.051, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.4166666666666665e-05, |
|
"loss": 0.0465, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 26.75, |
|
"learning_rate": 4.458333333333334e-05, |
|
"loss": 0.0502, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.0479, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 27.25, |
|
"learning_rate": 4.541666666666667e-05, |
|
"loss": 0.0526, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.0471, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 27.75, |
|
"learning_rate": 4.6250000000000006e-05, |
|
"loss": 0.0405, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.0449, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 28.25, |
|
"learning_rate": 4.708333333333334e-05, |
|
"loss": 0.0443, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.75e-05, |
|
"loss": 0.0406, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 28.75, |
|
"learning_rate": 4.791666666666667e-05, |
|
"loss": 0.0436, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.052, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 29.25, |
|
"learning_rate": 4.875e-05, |
|
"loss": 0.0468, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 4.9166666666666665e-05, |
|
"loss": 0.0402, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 29.75, |
|
"learning_rate": 4.958333333333334e-05, |
|
"loss": 0.0456, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.04, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_ELSE": 0.9936638999210017, |
|
"eval_accuracy_road": 0.9914136811528166, |
|
"eval_accuracy_sidewalk": 0.877640101379893, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9889762507404815, |
|
"eval_iou_road": 0.9755362822434882, |
|
"eval_iou_sidewalk": 0.8067564069376133, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.03659357875585556, |
|
"eval_mean_accuracy": 0.9542392274845705, |
|
"eval_mean_iou": 0.9237563133071943, |
|
"eval_overall_accuracy": 0.9899177551269531, |
|
"eval_runtime": 0.6094, |
|
"eval_samples_per_second": 1.641, |
|
"eval_steps_per_second": 1.641, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 30.25, |
|
"learning_rate": 4.99780701754386e-05, |
|
"loss": 0.0374, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 4.9956140350877195e-05, |
|
"loss": 0.0435, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 30.75, |
|
"learning_rate": 4.9934210526315795e-05, |
|
"loss": 0.0442, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 4.991228070175439e-05, |
|
"loss": 0.0387, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 31.25, |
|
"learning_rate": 4.989035087719299e-05, |
|
"loss": 0.0422, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 4.986842105263158e-05, |
|
"loss": 0.0398, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 31.75, |
|
"learning_rate": 4.984649122807018e-05, |
|
"loss": 0.0428, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 4.9824561403508773e-05, |
|
"loss": 0.0375, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 32.25, |
|
"learning_rate": 4.980263157894737e-05, |
|
"loss": 0.0447, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 4.9780701754385966e-05, |
|
"loss": 0.0358, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 32.75, |
|
"learning_rate": 4.9758771929824566e-05, |
|
"loss": 0.0357, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 4.973684210526316e-05, |
|
"loss": 0.0369, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 33.25, |
|
"learning_rate": 4.971491228070176e-05, |
|
"loss": 0.0333, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 4.969298245614035e-05, |
|
"loss": 0.0381, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 33.75, |
|
"learning_rate": 4.967105263157895e-05, |
|
"loss": 0.036, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 4.9649122807017544e-05, |
|
"loss": 0.0396, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 34.25, |
|
"learning_rate": 4.9627192982456144e-05, |
|
"loss": 0.0376, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 4.960526315789474e-05, |
|
"loss": 0.0388, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 34.75, |
|
"learning_rate": 4.958333333333334e-05, |
|
"loss": 0.0311, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 0.0339, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_ELSE": 0.9937234192214875, |
|
"eval_accuracy_road": 0.9900751836874181, |
|
"eval_accuracy_sidewalk": 0.9129822585187272, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9891420261754726, |
|
"eval_iou_road": 0.9801381449111926, |
|
"eval_iou_sidewalk": 0.8095892121363466, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.03316768258810043, |
|
"eval_mean_accuracy": 0.9655936204758776, |
|
"eval_mean_iou": 0.9262897944076706, |
|
"eval_overall_accuracy": 0.9905586242675781, |
|
"eval_runtime": 0.6199, |
|
"eval_samples_per_second": 1.613, |
|
"eval_steps_per_second": 1.613, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 35.25, |
|
"learning_rate": 4.953947368421053e-05, |
|
"loss": 0.0351, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 4.951754385964912e-05, |
|
"loss": 0.0318, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 35.75, |
|
"learning_rate": 4.949561403508772e-05, |
|
"loss": 0.0354, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 4.9473684210526315e-05, |
|
"loss": 0.0385, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 36.25, |
|
"learning_rate": 4.9451754385964915e-05, |
|
"loss": 0.0342, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 4.942982456140351e-05, |
|
"loss": 0.033, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 36.75, |
|
"learning_rate": 4.940789473684211e-05, |
|
"loss": 0.0337, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 4.93859649122807e-05, |
|
"loss": 0.0324, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 37.25, |
|
"learning_rate": 4.93640350877193e-05, |
|
"loss": 0.0328, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 4.9342105263157894e-05, |
|
"loss": 0.0363, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 37.75, |
|
"learning_rate": 4.932017543859649e-05, |
|
"loss": 0.0309, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 4.9298245614035086e-05, |
|
"loss": 0.034, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 38.25, |
|
"learning_rate": 4.9276315789473686e-05, |
|
"loss": 0.0297, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 4.925438596491228e-05, |
|
"loss": 0.031, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 38.75, |
|
"learning_rate": 4.923245614035088e-05, |
|
"loss": 0.0308, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.921052631578947e-05, |
|
"loss": 0.0368, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 39.25, |
|
"learning_rate": 4.918859649122807e-05, |
|
"loss": 0.032, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 4.9166666666666665e-05, |
|
"loss": 0.0336, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 39.75, |
|
"learning_rate": 4.9144736842105264e-05, |
|
"loss": 0.0285, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.0341, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_ELSE": 0.9941238217883926, |
|
"eval_accuracy_road": 0.9888790795694025, |
|
"eval_accuracy_sidewalk": 0.9129822585187272, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9896685088824968, |
|
"eval_iou_road": 0.978181869401093, |
|
"eval_iou_sidewalk": 0.8118192062100914, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.03096950612962246, |
|
"eval_mean_accuracy": 0.9653283866255075, |
|
"eval_mean_iou": 0.9265565281645604, |
|
"eval_overall_accuracy": 0.9905204772949219, |
|
"eval_runtime": 0.6028, |
|
"eval_samples_per_second": 1.659, |
|
"eval_steps_per_second": 1.659, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 40.25, |
|
"learning_rate": 4.910087719298246e-05, |
|
"loss": 0.0302, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 4.907894736842106e-05, |
|
"loss": 0.0324, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 40.75, |
|
"learning_rate": 4.905701754385965e-05, |
|
"loss": 0.0301, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 4.903508771929825e-05, |
|
"loss": 0.0294, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 41.25, |
|
"learning_rate": 4.901315789473684e-05, |
|
"loss": 0.031, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 4.899122807017544e-05, |
|
"loss": 0.0311, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 41.75, |
|
"learning_rate": 4.8969298245614035e-05, |
|
"loss": 0.0287, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 4.8947368421052635e-05, |
|
"loss": 0.0281, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 42.25, |
|
"learning_rate": 4.892543859649123e-05, |
|
"loss": 0.0295, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 4.890350877192983e-05, |
|
"loss": 0.0336, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 42.75, |
|
"learning_rate": 4.888157894736842e-05, |
|
"loss": 0.0288, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 4.885964912280702e-05, |
|
"loss": 0.0327, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 43.25, |
|
"learning_rate": 4.8837719298245614e-05, |
|
"loss": 0.029, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 4.881578947368421e-05, |
|
"loss": 0.0321, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 43.75, |
|
"learning_rate": 4.8793859649122806e-05, |
|
"loss": 0.0291, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 4.8771929824561406e-05, |
|
"loss": 0.0296, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 44.25, |
|
"learning_rate": 4.875e-05, |
|
"loss": 0.0268, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 4.87280701754386e-05, |
|
"loss": 0.0298, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 44.75, |
|
"learning_rate": 4.870614035087719e-05, |
|
"loss": 0.0286, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.0295, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_ELSE": 0.9963422684428669, |
|
"eval_accuracy_road": 0.9892920202768126, |
|
"eval_accuracy_sidewalk": 0.916784004505773, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9912789758718332, |
|
"eval_iou_road": 0.9822428320986258, |
|
"eval_iou_sidewalk": 0.8485598853121334, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.02526058815419674, |
|
"eval_mean_accuracy": 0.9674727644084843, |
|
"eval_mean_iou": 0.9406938977608642, |
|
"eval_overall_accuracy": 0.9922981262207031, |
|
"eval_runtime": 0.6252, |
|
"eval_samples_per_second": 1.6, |
|
"eval_steps_per_second": 1.6, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 45.25, |
|
"learning_rate": 4.8662280701754385e-05, |
|
"loss": 0.0255, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 4.8640350877192984e-05, |
|
"loss": 0.0291, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 45.75, |
|
"learning_rate": 4.861842105263158e-05, |
|
"loss": 0.0353, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.859649122807018e-05, |
|
"loss": 0.0296, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 46.25, |
|
"learning_rate": 4.857456140350877e-05, |
|
"loss": 0.0275, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 4.855263157894737e-05, |
|
"loss": 0.0301, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 46.75, |
|
"learning_rate": 4.853070175438596e-05, |
|
"loss": 0.0315, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 4.850877192982456e-05, |
|
"loss": 0.0401, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 47.25, |
|
"learning_rate": 4.8486842105263156e-05, |
|
"loss": 0.0274, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 4.8464912280701755e-05, |
|
"loss": 0.028, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 47.75, |
|
"learning_rate": 4.844298245614035e-05, |
|
"loss": 0.0309, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 4.842105263157895e-05, |
|
"loss": 0.0291, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 48.25, |
|
"learning_rate": 4.839912280701754e-05, |
|
"loss": 0.0273, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 4.837719298245614e-05, |
|
"loss": 0.0302, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 48.75, |
|
"learning_rate": 4.8355263157894734e-05, |
|
"loss": 0.0313, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.0268, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 49.25, |
|
"learning_rate": 4.8311403508771927e-05, |
|
"loss": 0.0263, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 4.8289473684210526e-05, |
|
"loss": 0.0262, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 49.75, |
|
"learning_rate": 4.826754385964912e-05, |
|
"loss": 0.0267, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.058, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_ELSE": 0.9955793392275477, |
|
"eval_accuracy_road": 0.990217577034801, |
|
"eval_accuracy_sidewalk": 0.9076316530554773, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9915928798159056, |
|
"eval_iou_road": 0.980873661791049, |
|
"eval_iou_sidewalk": 0.8211464968152866, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.02616145648062229, |
|
"eval_mean_accuracy": 0.9644761897726086, |
|
"eval_mean_iou": 0.931204346140747, |
|
"eval_overall_accuracy": 0.99176025390625, |
|
"eval_runtime": 0.6133, |
|
"eval_samples_per_second": 1.63, |
|
"eval_steps_per_second": 1.63, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 50.25, |
|
"learning_rate": 4.822368421052631e-05, |
|
"loss": 0.0327, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 4.820175438596491e-05, |
|
"loss": 0.0261, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 50.75, |
|
"learning_rate": 4.817982456140351e-05, |
|
"loss": 0.026, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 4.8157894736842105e-05, |
|
"loss": 0.0241, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 51.25, |
|
"learning_rate": 4.8135964912280704e-05, |
|
"loss": 0.0252, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 4.81140350877193e-05, |
|
"loss": 0.0259, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 51.75, |
|
"learning_rate": 4.80921052631579e-05, |
|
"loss": 0.0293, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 4.807017543859649e-05, |
|
"loss": 0.0289, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 52.25, |
|
"learning_rate": 4.804824561403509e-05, |
|
"loss": 0.0263, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 4.802631578947368e-05, |
|
"loss": 0.0233, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 52.75, |
|
"learning_rate": 4.800438596491228e-05, |
|
"loss": 0.0256, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 4.7982456140350876e-05, |
|
"loss": 0.0285, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 53.25, |
|
"learning_rate": 4.7960526315789475e-05, |
|
"loss": 0.0247, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 4.793859649122807e-05, |
|
"loss": 0.024, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 53.75, |
|
"learning_rate": 4.791666666666667e-05, |
|
"loss": 0.0237, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.0269, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 54.25, |
|
"learning_rate": 4.787280701754386e-05, |
|
"loss": 0.0226, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 4.785087719298246e-05, |
|
"loss": 0.0279, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 54.75, |
|
"learning_rate": 4.7828947368421054e-05, |
|
"loss": 0.0241, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.0242, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_ELSE": 0.9960013851764477, |
|
"eval_accuracy_road": 0.9922680412371134, |
|
"eval_accuracy_sidewalk": 0.9050971557307801, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.991820812207357, |
|
"eval_iou_road": 0.9837928649075995, |
|
"eval_iou_sidewalk": 0.8376335678915819, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.024843668565154076, |
|
"eval_mean_accuracy": 0.964455527381447, |
|
"eval_mean_iou": 0.9377490816688462, |
|
"eval_overall_accuracy": 0.9925384521484375, |
|
"eval_runtime": 0.6054, |
|
"eval_samples_per_second": 1.652, |
|
"eval_steps_per_second": 1.652, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 55.25, |
|
"learning_rate": 4.7785087719298246e-05, |
|
"loss": 0.0225, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 4.7763157894736846e-05, |
|
"loss": 0.0233, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 55.75, |
|
"learning_rate": 4.774122807017544e-05, |
|
"loss": 0.0305, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 4.771929824561404e-05, |
|
"loss": 0.0237, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 4.769736842105263e-05, |
|
"loss": 0.0222, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 4.767543859649123e-05, |
|
"loss": 0.0234, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 56.75, |
|
"learning_rate": 4.7653508771929825e-05, |
|
"loss": 0.0232, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 4.7631578947368424e-05, |
|
"loss": 0.0227, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 57.25, |
|
"learning_rate": 4.760964912280702e-05, |
|
"loss": 0.0236, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 4.758771929824562e-05, |
|
"loss": 0.0255, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 57.75, |
|
"learning_rate": 4.756578947368422e-05, |
|
"loss": 0.0216, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 4.754385964912281e-05, |
|
"loss": 0.0236, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 58.25, |
|
"learning_rate": 4.752192982456141e-05, |
|
"loss": 0.0237, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 4.75e-05, |
|
"loss": 0.0232, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 58.75, |
|
"learning_rate": 4.74780701754386e-05, |
|
"loss": 0.0212, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 4.7456140350877195e-05, |
|
"loss": 0.0423, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 59.25, |
|
"learning_rate": 4.7434210526315795e-05, |
|
"loss": 0.0226, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 4.741228070175439e-05, |
|
"loss": 0.023, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 59.75, |
|
"learning_rate": 4.739035087719299e-05, |
|
"loss": 0.0233, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.0217, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_ELSE": 0.9959797417944528, |
|
"eval_accuracy_road": 0.9896622429800079, |
|
"eval_accuracy_sidewalk": 0.9128414531117995, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9915428165114387, |
|
"eval_iou_road": 0.9828744361008585, |
|
"eval_iou_sidewalk": 0.8229246001523229, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.02502310648560524, |
|
"eval_mean_accuracy": 0.9661611459620868, |
|
"eval_mean_iou": 0.9324472842548733, |
|
"eval_overall_accuracy": 0.992034912109375, |
|
"eval_runtime": 0.6032, |
|
"eval_samples_per_second": 1.658, |
|
"eval_steps_per_second": 1.658, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 60.25, |
|
"learning_rate": 4.734649122807018e-05, |
|
"loss": 0.0313, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 4.7324561403508774e-05, |
|
"loss": 0.0209, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 60.75, |
|
"learning_rate": 4.730263157894737e-05, |
|
"loss": 0.0228, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 4.728070175438597e-05, |
|
"loss": 0.022, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 61.25, |
|
"learning_rate": 4.7258771929824566e-05, |
|
"loss": 0.0235, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 4.7236842105263166e-05, |
|
"loss": 0.0232, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 61.75, |
|
"learning_rate": 4.721491228070176e-05, |
|
"loss": 0.0241, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 4.719298245614036e-05, |
|
"loss": 0.0213, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 62.25, |
|
"learning_rate": 4.717105263157895e-05, |
|
"loss": 0.022, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 4.714912280701755e-05, |
|
"loss": 0.0212, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 62.75, |
|
"learning_rate": 4.7127192982456144e-05, |
|
"loss": 0.0221, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 4.7105263157894744e-05, |
|
"loss": 0.0248, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 63.25, |
|
"learning_rate": 4.708333333333334e-05, |
|
"loss": 0.0244, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 4.706140350877194e-05, |
|
"loss": 0.0227, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 63.75, |
|
"learning_rate": 4.703947368421053e-05, |
|
"loss": 0.0238, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 4.701754385964913e-05, |
|
"loss": 0.0223, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 64.25, |
|
"learning_rate": 4.699561403508772e-05, |
|
"loss": 0.0224, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 4.697368421052632e-05, |
|
"loss": 0.0208, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 64.75, |
|
"learning_rate": 4.6951754385964915e-05, |
|
"loss": 0.0201, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 0.0223, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_ELSE": 0.9958877574209746, |
|
"eval_accuracy_road": 0.9915703138349377, |
|
"eval_accuracy_sidewalk": 0.8920022528865108, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9908907863426399, |
|
"eval_iou_road": 0.9826294325990941, |
|
"eval_iou_sidewalk": 0.8281045751633986, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.024835307151079178, |
|
"eval_mean_accuracy": 0.9598201080474743, |
|
"eval_mean_iou": 0.9338749313683775, |
|
"eval_overall_accuracy": 0.9919166564941406, |
|
"eval_runtime": 0.6119, |
|
"eval_samples_per_second": 1.634, |
|
"eval_steps_per_second": 1.634, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 65.25, |
|
"learning_rate": 4.690789473684211e-05, |
|
"loss": 0.0239, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 4.688596491228071e-05, |
|
"loss": 0.0228, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 65.75, |
|
"learning_rate": 4.68640350877193e-05, |
|
"loss": 0.0219, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 4.68421052631579e-05, |
|
"loss": 0.0197, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 66.25, |
|
"learning_rate": 4.6820175438596494e-05, |
|
"loss": 0.0251, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 4.679824561403509e-05, |
|
"loss": 0.0219, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 66.75, |
|
"learning_rate": 4.6776315789473686e-05, |
|
"loss": 0.0187, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 4.6754385964912286e-05, |
|
"loss": 0.0204, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 67.25, |
|
"learning_rate": 4.673245614035088e-05, |
|
"loss": 0.0196, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 4.671052631578948e-05, |
|
"loss": 0.0199, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 67.75, |
|
"learning_rate": 4.668859649122807e-05, |
|
"loss": 0.0219, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.0209, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 68.25, |
|
"learning_rate": 4.6644736842105265e-05, |
|
"loss": 0.0223, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 4.6622807017543864e-05, |
|
"loss": 0.0192, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 68.75, |
|
"learning_rate": 4.660087719298246e-05, |
|
"loss": 0.0193, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 4.657894736842106e-05, |
|
"loss": 0.0207, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 69.25, |
|
"learning_rate": 4.655701754385965e-05, |
|
"loss": 0.0211, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 4.653508771929825e-05, |
|
"loss": 0.0209, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 69.75, |
|
"learning_rate": 4.651315789473684e-05, |
|
"loss": 0.0211, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.0206, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_ELSE": 0.9964775395803348, |
|
"eval_accuracy_road": 0.9925813066013556, |
|
"eval_accuracy_sidewalk": 0.9038299070684315, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9921025270835915, |
|
"eval_iou_road": 0.9847290501214895, |
|
"eval_iou_sidewalk": 0.8466103930361383, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.02149595506489277, |
|
"eval_mean_accuracy": 0.964296251083374, |
|
"eval_mean_iou": 0.9411473234137397, |
|
"eval_overall_accuracy": 0.9929237365722656, |
|
"eval_runtime": 0.6222, |
|
"eval_samples_per_second": 1.607, |
|
"eval_steps_per_second": 1.607, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 70.25, |
|
"learning_rate": 4.6469298245614036e-05, |
|
"loss": 0.0207, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 4.6447368421052635e-05, |
|
"loss": 0.0191, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 70.75, |
|
"learning_rate": 4.6425438596491235e-05, |
|
"loss": 0.0212, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 4.640350877192983e-05, |
|
"loss": 0.0197, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 71.25, |
|
"learning_rate": 4.638157894736843e-05, |
|
"loss": 0.0189, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 4.635964912280702e-05, |
|
"loss": 0.0192, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 71.75, |
|
"learning_rate": 4.633771929824562e-05, |
|
"loss": 0.0176, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 4.6315789473684214e-05, |
|
"loss": 0.0207, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 72.25, |
|
"learning_rate": 4.629385964912281e-05, |
|
"loss": 0.0209, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 4.6271929824561406e-05, |
|
"loss": 0.0198, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 72.75, |
|
"learning_rate": 4.6250000000000006e-05, |
|
"loss": 0.0196, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 4.62280701754386e-05, |
|
"loss": 0.0186, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 73.25, |
|
"learning_rate": 4.62061403508772e-05, |
|
"loss": 0.0191, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 4.618421052631579e-05, |
|
"loss": 0.0196, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 73.75, |
|
"learning_rate": 4.616228070175439e-05, |
|
"loss": 0.0193, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 4.6140350877192985e-05, |
|
"loss": 0.0176, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 74.25, |
|
"learning_rate": 4.6118421052631584e-05, |
|
"loss": 0.0196, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 4.609649122807018e-05, |
|
"loss": 0.0184, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 74.75, |
|
"learning_rate": 4.607456140350878e-05, |
|
"loss": 0.0193, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.0194, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_ELSE": 0.9953520837166016, |
|
"eval_accuracy_road": 0.9927236999487384, |
|
"eval_accuracy_sidewalk": 0.9125598422979443, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9915268397591726, |
|
"eval_iou_road": 0.9848564042435972, |
|
"eval_iou_sidewalk": 0.828878373193503, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.022591305896639824, |
|
"eval_mean_accuracy": 0.9668785419877614, |
|
"eval_mean_iou": 0.9350872057320908, |
|
"eval_overall_accuracy": 0.9924049377441406, |
|
"eval_runtime": 0.6107, |
|
"eval_samples_per_second": 1.637, |
|
"eval_steps_per_second": 1.637, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 75.25, |
|
"learning_rate": 4.603070175438597e-05, |
|
"loss": 0.0205, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 4.600877192982456e-05, |
|
"loss": 0.0206, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 75.75, |
|
"learning_rate": 4.598684210526316e-05, |
|
"loss": 0.0197, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 4.5964912280701756e-05, |
|
"loss": 0.0208, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 76.25, |
|
"learning_rate": 4.5942982456140355e-05, |
|
"loss": 0.019, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 4.592105263157895e-05, |
|
"loss": 0.0198, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 76.75, |
|
"learning_rate": 4.589912280701755e-05, |
|
"loss": 0.0196, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 4.587719298245614e-05, |
|
"loss": 0.0171, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 77.25, |
|
"learning_rate": 4.585526315789474e-05, |
|
"loss": 0.0189, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.0186, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 77.75, |
|
"learning_rate": 4.5811403508771934e-05, |
|
"loss": 0.0184, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 4.5789473684210527e-05, |
|
"loss": 0.0154, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 78.25, |
|
"learning_rate": 4.5767543859649126e-05, |
|
"loss": 0.0177, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 4.574561403508772e-05, |
|
"loss": 0.0172, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 78.75, |
|
"learning_rate": 4.572368421052632e-05, |
|
"loss": 0.0201, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 4.570175438596491e-05, |
|
"loss": 0.0175, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 79.25, |
|
"learning_rate": 4.567982456140351e-05, |
|
"loss": 0.0167, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 4.5657894736842105e-05, |
|
"loss": 0.0182, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 79.75, |
|
"learning_rate": 4.5635964912280705e-05, |
|
"loss": 0.0164, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.02, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_ELSE": 0.9966560974817925, |
|
"eval_accuracy_road": 0.9917981431907501, |
|
"eval_accuracy_sidewalk": 0.8993241340467474, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9916873048347152, |
|
"eval_iou_road": 0.9854137487090248, |
|
"eval_iou_sidewalk": 0.8370904325032765, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.021616503596305847, |
|
"eval_mean_accuracy": 0.9625927915730967, |
|
"eval_mean_iou": 0.9380638286823388, |
|
"eval_overall_accuracy": 0.9927177429199219, |
|
"eval_runtime": 0.6134, |
|
"eval_samples_per_second": 1.63, |
|
"eval_steps_per_second": 1.63, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 80.25, |
|
"learning_rate": 4.55921052631579e-05, |
|
"loss": 0.0188, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 4.557017543859649e-05, |
|
"loss": 0.0165, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 80.75, |
|
"learning_rate": 4.554824561403509e-05, |
|
"loss": 0.0179, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 4.552631578947369e-05, |
|
"loss": 0.018, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 81.25, |
|
"learning_rate": 4.550438596491228e-05, |
|
"loss": 0.0163, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 4.548245614035088e-05, |
|
"loss": 0.0206, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 81.75, |
|
"learning_rate": 4.5460526315789476e-05, |
|
"loss": 0.0161, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 4.5438596491228075e-05, |
|
"loss": 0.0194, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 82.25, |
|
"learning_rate": 4.541666666666667e-05, |
|
"loss": 0.0181, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 4.539473684210527e-05, |
|
"loss": 0.0157, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 82.75, |
|
"learning_rate": 4.537280701754386e-05, |
|
"loss": 0.02, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 4.535087719298246e-05, |
|
"loss": 0.02, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 83.25, |
|
"learning_rate": 4.5328947368421054e-05, |
|
"loss": 0.0192, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 4.5307017543859654e-05, |
|
"loss": 0.0178, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 83.75, |
|
"learning_rate": 4.5285087719298247e-05, |
|
"loss": 0.0177, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 4.5263157894736846e-05, |
|
"loss": 0.0169, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 84.25, |
|
"learning_rate": 4.524122807017544e-05, |
|
"loss": 0.0183, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 4.521929824561404e-05, |
|
"loss": 0.0187, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 84.75, |
|
"learning_rate": 4.519736842105263e-05, |
|
"loss": 0.0157, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.0181, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_ELSE": 0.9960879587044271, |
|
"eval_accuracy_road": 0.9906874750811642, |
|
"eval_accuracy_sidewalk": 0.9091805125316812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9915063446582071, |
|
"eval_iou_road": 0.9846166909610676, |
|
"eval_iou_sidewalk": 0.8239122113053464, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.022249778732657433, |
|
"eval_mean_accuracy": 0.9653186487724241, |
|
"eval_mean_iou": 0.933345082308207, |
|
"eval_overall_accuracy": 0.9922866821289062, |
|
"eval_runtime": 0.6261, |
|
"eval_samples_per_second": 1.597, |
|
"eval_steps_per_second": 1.597, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 85.25, |
|
"learning_rate": 4.5153508771929825e-05, |
|
"loss": 0.0168, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 4.5131578947368425e-05, |
|
"loss": 0.019, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 85.75, |
|
"learning_rate": 4.510964912280702e-05, |
|
"loss": 0.0154, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 4.508771929824562e-05, |
|
"loss": 0.0172, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 86.25, |
|
"learning_rate": 4.506578947368421e-05, |
|
"loss": 0.0174, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 4.504385964912281e-05, |
|
"loss": 0.0172, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 86.75, |
|
"learning_rate": 4.50219298245614e-05, |
|
"loss": 0.0158, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.0185, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 87.25, |
|
"learning_rate": 4.4978070175438596e-05, |
|
"loss": 0.0167, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 4.4956140350877196e-05, |
|
"loss": 0.0164, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 87.75, |
|
"learning_rate": 4.493421052631579e-05, |
|
"loss": 0.0172, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 4.491228070175439e-05, |
|
"loss": 0.0177, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 88.25, |
|
"learning_rate": 4.489035087719298e-05, |
|
"loss": 0.0163, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 4.486842105263158e-05, |
|
"loss": 0.0171, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 88.75, |
|
"learning_rate": 4.4846491228070174e-05, |
|
"loss": 0.0163, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 4.4824561403508774e-05, |
|
"loss": 0.0174, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 89.25, |
|
"learning_rate": 4.480263157894737e-05, |
|
"loss": 0.017, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 4.4780701754385967e-05, |
|
"loss": 0.0153, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 89.75, |
|
"learning_rate": 4.475877192982456e-05, |
|
"loss": 0.0155, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.018, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_ELSE": 0.9963801443613579, |
|
"eval_accuracy_road": 0.9914136811528166, |
|
"eval_accuracy_sidewalk": 0.8993241340467474, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9916369589333218, |
|
"eval_iou_road": 0.9848366974553375, |
|
"eval_iou_sidewalk": 0.8256204756980352, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.022521808743476868, |
|
"eval_mean_accuracy": 0.962372653186974, |
|
"eval_mean_iou": 0.9340313773622316, |
|
"eval_overall_accuracy": 0.9924201965332031, |
|
"eval_runtime": 0.6094, |
|
"eval_samples_per_second": 1.641, |
|
"eval_steps_per_second": 1.641, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 90.25, |
|
"learning_rate": 4.471491228070175e-05, |
|
"loss": 0.0152, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 4.469298245614035e-05, |
|
"loss": 0.0175, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 90.75, |
|
"learning_rate": 4.4671052631578945e-05, |
|
"loss": 0.0161, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 4.4649122807017545e-05, |
|
"loss": 0.0171, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 91.25, |
|
"learning_rate": 4.4627192982456145e-05, |
|
"loss": 0.017, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 4.460526315789474e-05, |
|
"loss": 0.0149, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 91.75, |
|
"learning_rate": 4.458333333333334e-05, |
|
"loss": 0.0158, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 4.456140350877193e-05, |
|
"loss": 0.0176, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 92.25, |
|
"learning_rate": 4.453947368421053e-05, |
|
"loss": 0.0168, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 4.451754385964912e-05, |
|
"loss": 0.0159, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 92.75, |
|
"learning_rate": 4.449561403508772e-05, |
|
"loss": 0.018, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 4.4473684210526316e-05, |
|
"loss": 0.0157, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 93.25, |
|
"learning_rate": 4.4451754385964916e-05, |
|
"loss": 0.0154, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 4.442982456140351e-05, |
|
"loss": 0.0161, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 93.75, |
|
"learning_rate": 4.440789473684211e-05, |
|
"loss": 0.0163, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 4.43859649122807e-05, |
|
"loss": 0.018, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 94.25, |
|
"learning_rate": 4.43640350877193e-05, |
|
"loss": 0.0165, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 4.4342105263157894e-05, |
|
"loss": 0.0157, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 94.75, |
|
"learning_rate": 4.4320175438596494e-05, |
|
"loss": 0.0159, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.0169, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_ELSE": 0.9958120055839925, |
|
"eval_accuracy_road": 0.9931366406561486, |
|
"eval_accuracy_sidewalk": 0.9098845395663193, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9922096126371405, |
|
"eval_iou_road": 0.9854609678558813, |
|
"eval_iou_sidewalk": 0.8305912596401028, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020703531801700592, |
|
"eval_mean_accuracy": 0.9662777286021536, |
|
"eval_mean_iou": 0.9360872800443749, |
|
"eval_overall_accuracy": 0.992767333984375, |
|
"eval_runtime": 0.6103, |
|
"eval_samples_per_second": 1.638, |
|
"eval_steps_per_second": 1.638, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 95.25, |
|
"learning_rate": 4.4276315789473687e-05, |
|
"loss": 0.0169, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 4.425438596491228e-05, |
|
"loss": 0.0148, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 95.75, |
|
"learning_rate": 4.423245614035088e-05, |
|
"loss": 0.0165, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 4.421052631578947e-05, |
|
"loss": 0.018, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 96.25, |
|
"learning_rate": 4.418859649122807e-05, |
|
"loss": 0.0165, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 4.4166666666666665e-05, |
|
"loss": 0.0156, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 96.75, |
|
"learning_rate": 4.4144736842105265e-05, |
|
"loss": 0.0147, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 4.412280701754386e-05, |
|
"loss": 0.0176, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 97.25, |
|
"learning_rate": 4.410087719298246e-05, |
|
"loss": 0.0153, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 4.407894736842105e-05, |
|
"loss": 0.0159, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 97.75, |
|
"learning_rate": 4.405701754385965e-05, |
|
"loss": 0.0159, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 4.403508771929824e-05, |
|
"loss": 0.0154, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 98.25, |
|
"learning_rate": 4.401315789473684e-05, |
|
"loss": 0.0141, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 4.3991228070175436e-05, |
|
"loss": 0.0151, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 98.75, |
|
"learning_rate": 4.3969298245614036e-05, |
|
"loss": 0.0167, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 4.394736842105263e-05, |
|
"loss": 0.0178, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 99.25, |
|
"learning_rate": 4.392543859649123e-05, |
|
"loss": 0.0161, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 4.390350877192982e-05, |
|
"loss": 0.015, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 99.75, |
|
"learning_rate": 4.388157894736842e-05, |
|
"loss": 0.0146, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.0157, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_ELSE": 0.995974330948954, |
|
"eval_accuracy_road": 0.99229651990659, |
|
"eval_accuracy_sidewalk": 0.9091805125316812, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9920878741821082, |
|
"eval_iou_road": 0.9857555096613574, |
|
"eval_iou_sidewalk": 0.8233868910992094, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020867783576250076, |
|
"eval_mean_accuracy": 0.965817121129075, |
|
"eval_mean_iou": 0.9337434249808917, |
|
"eval_overall_accuracy": 0.9926376342773438, |
|
"eval_runtime": 0.6759, |
|
"eval_samples_per_second": 1.479, |
|
"eval_steps_per_second": 1.479, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 100.25, |
|
"learning_rate": 4.3837719298245614e-05, |
|
"loss": 0.0166, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 4.381578947368421e-05, |
|
"loss": 0.0153, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 100.75, |
|
"learning_rate": 4.379385964912281e-05, |
|
"loss": 0.0143, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 4.37719298245614e-05, |
|
"loss": 0.0163, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 101.25, |
|
"learning_rate": 4.375e-05, |
|
"loss": 0.0148, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 4.37280701754386e-05, |
|
"loss": 0.0151, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 101.75, |
|
"learning_rate": 4.370614035087719e-05, |
|
"loss": 0.0147, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 4.368421052631579e-05, |
|
"loss": 0.0173, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 102.25, |
|
"learning_rate": 4.3662280701754385e-05, |
|
"loss": 0.0156, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 4.3640350877192985e-05, |
|
"loss": 0.0143, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 102.75, |
|
"learning_rate": 4.361842105263158e-05, |
|
"loss": 0.0167, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 4.359649122807018e-05, |
|
"loss": 0.0158, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 103.25, |
|
"learning_rate": 4.357456140350877e-05, |
|
"loss": 0.014, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 4.355263157894737e-05, |
|
"loss": 0.0139, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 103.75, |
|
"learning_rate": 4.353070175438596e-05, |
|
"loss": 0.0141, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 4.350877192982456e-05, |
|
"loss": 0.0174, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 104.25, |
|
"learning_rate": 4.3486842105263156e-05, |
|
"loss": 0.0158, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 4.3464912280701756e-05, |
|
"loss": 0.0156, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 104.75, |
|
"learning_rate": 4.344298245614035e-05, |
|
"loss": 0.0162, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.0147, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_ELSE": 0.9954440680900798, |
|
"eval_accuracy_road": 0.9913994418180783, |
|
"eval_accuracy_sidewalk": 0.9119966206702337, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9918483534967976, |
|
"eval_iou_road": 0.9835704295986551, |
|
"eval_iou_sidewalk": 0.8153323262839879, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.022910170257091522, |
|
"eval_mean_accuracy": 0.9662800435261305, |
|
"eval_mean_iou": 0.9302503697931469, |
|
"eval_overall_accuracy": 0.9920997619628906, |
|
"eval_runtime": 0.6023, |
|
"eval_samples_per_second": 1.66, |
|
"eval_steps_per_second": 1.66, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 105.25, |
|
"learning_rate": 4.339912280701754e-05, |
|
"loss": 0.015, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 4.337719298245614e-05, |
|
"loss": 0.0146, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 105.75, |
|
"learning_rate": 4.335526315789474e-05, |
|
"loss": 0.0162, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.0146, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 106.25, |
|
"learning_rate": 4.3311403508771934e-05, |
|
"loss": 0.0141, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 4.328947368421053e-05, |
|
"loss": 0.0158, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 106.75, |
|
"learning_rate": 4.3267543859649127e-05, |
|
"loss": 0.0166, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 4.324561403508772e-05, |
|
"loss": 0.0145, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 107.25, |
|
"learning_rate": 4.322368421052632e-05, |
|
"loss": 0.0151, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 4.320175438596491e-05, |
|
"loss": 0.0148, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 107.75, |
|
"learning_rate": 4.317982456140351e-05, |
|
"loss": 0.0137, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 4.3157894736842105e-05, |
|
"loss": 0.0154, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 108.25, |
|
"learning_rate": 4.3135964912280705e-05, |
|
"loss": 0.0143, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 4.31140350877193e-05, |
|
"loss": 0.0142, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 108.75, |
|
"learning_rate": 4.30921052631579e-05, |
|
"loss": 0.0152, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 4.30701754385965e-05, |
|
"loss": 0.017, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 109.25, |
|
"learning_rate": 4.304824561403509e-05, |
|
"loss": 0.017, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 4.302631578947369e-05, |
|
"loss": 0.0139, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 109.75, |
|
"learning_rate": 4.300438596491228e-05, |
|
"loss": 0.0152, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.014, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_ELSE": 0.9961961756144015, |
|
"eval_accuracy_road": 0.9930227259782423, |
|
"eval_accuracy_sidewalk": 0.9081948746831878, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9925602859437924, |
|
"eval_iou_road": 0.9856543185447967, |
|
"eval_iou_sidewalk": 0.8327953518398967, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.02052408643066883, |
|
"eval_mean_accuracy": 0.9658045920919438, |
|
"eval_mean_iou": 0.9370033187761618, |
|
"eval_overall_accuracy": 0.9929618835449219, |
|
"eval_runtime": 0.6167, |
|
"eval_samples_per_second": 1.622, |
|
"eval_steps_per_second": 1.622, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 110.25, |
|
"learning_rate": 4.2960526315789476e-05, |
|
"loss": 0.0149, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 4.2938596491228076e-05, |
|
"loss": 0.0133, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 110.75, |
|
"learning_rate": 4.291666666666667e-05, |
|
"loss": 0.0157, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 4.289473684210527e-05, |
|
"loss": 0.0138, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 111.25, |
|
"learning_rate": 4.287280701754386e-05, |
|
"loss": 0.0134, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 4.285087719298246e-05, |
|
"loss": 0.0144, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 111.75, |
|
"learning_rate": 4.2828947368421054e-05, |
|
"loss": 0.0153, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 4.2807017543859654e-05, |
|
"loss": 0.0143, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 112.25, |
|
"learning_rate": 4.2785087719298254e-05, |
|
"loss": 0.0158, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 4.2763157894736847e-05, |
|
"loss": 0.0139, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 112.75, |
|
"learning_rate": 4.2741228070175446e-05, |
|
"loss": 0.0144, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 4.271929824561404e-05, |
|
"loss": 0.0133, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 113.25, |
|
"learning_rate": 4.269736842105264e-05, |
|
"loss": 0.0139, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 4.267543859649123e-05, |
|
"loss": 0.0135, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 113.75, |
|
"learning_rate": 4.265350877192983e-05, |
|
"loss": 0.0148, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 4.2631578947368425e-05, |
|
"loss": 0.0143, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 114.25, |
|
"learning_rate": 4.2609649122807025e-05, |
|
"loss": 0.0157, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 4.258771929824562e-05, |
|
"loss": 0.0139, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 114.75, |
|
"learning_rate": 4.256578947368422e-05, |
|
"loss": 0.0142, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.013, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_ELSE": 0.9967589035462682, |
|
"eval_accuracy_road": 0.992752178618215, |
|
"eval_accuracy_sidewalk": 0.8943959448042805, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9922223000231607, |
|
"eval_iou_road": 0.9856922707158106, |
|
"eval_iou_sidewalk": 0.8344718864950079, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.021300747990608215, |
|
"eval_mean_accuracy": 0.9613023423229213, |
|
"eval_mean_iou": 0.9374621524113264, |
|
"eval_overall_accuracy": 0.9929122924804688, |
|
"eval_runtime": 0.6042, |
|
"eval_samples_per_second": 1.655, |
|
"eval_steps_per_second": 1.655, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 115.25, |
|
"learning_rate": 4.252192982456141e-05, |
|
"loss": 0.014, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 4.25e-05, |
|
"loss": 0.0127, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 115.75, |
|
"learning_rate": 4.24780701754386e-05, |
|
"loss": 0.0142, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 4.2456140350877196e-05, |
|
"loss": 0.0149, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 116.25, |
|
"learning_rate": 4.2434210526315796e-05, |
|
"loss": 0.0149, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 4.241228070175439e-05, |
|
"loss": 0.0135, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 116.75, |
|
"learning_rate": 4.239035087719299e-05, |
|
"loss": 0.0149, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 4.236842105263158e-05, |
|
"loss": 0.0137, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 117.25, |
|
"learning_rate": 4.234649122807018e-05, |
|
"loss": 0.0128, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 4.2324561403508774e-05, |
|
"loss": 0.0145, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 117.75, |
|
"learning_rate": 4.2302631578947374e-05, |
|
"loss": 0.0137, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 4.228070175438597e-05, |
|
"loss": 0.0168, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 118.25, |
|
"learning_rate": 4.2258771929824567e-05, |
|
"loss": 0.0135, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 4.223684210526316e-05, |
|
"loss": 0.0141, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 118.75, |
|
"learning_rate": 4.221491228070176e-05, |
|
"loss": 0.0139, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 4.219298245614035e-05, |
|
"loss": 0.0136, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 119.25, |
|
"learning_rate": 4.217105263157895e-05, |
|
"loss": 0.0132, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 4.2149122807017545e-05, |
|
"loss": 0.0151, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 119.75, |
|
"learning_rate": 4.2127192982456145e-05, |
|
"loss": 0.0154, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.0146, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_ELSE": 0.9978789485645027, |
|
"eval_accuracy_road": 0.9921683658939454, |
|
"eval_accuracy_sidewalk": 0.8914390312588003, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9929949440833931, |
|
"eval_iou_road": 0.9865771812080537, |
|
"eval_iou_sidewalk": 0.843233883857219, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020083561539649963, |
|
"eval_mean_accuracy": 0.9604954485724161, |
|
"eval_mean_iou": 0.9409353363828886, |
|
"eval_overall_accuracy": 0.9934654235839844, |
|
"eval_runtime": 0.5941, |
|
"eval_samples_per_second": 1.683, |
|
"eval_steps_per_second": 1.683, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 120.25, |
|
"learning_rate": 4.208333333333334e-05, |
|
"loss": 0.0128, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 120.5, |
|
"learning_rate": 4.206140350877193e-05, |
|
"loss": 0.015, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 120.75, |
|
"learning_rate": 4.203947368421053e-05, |
|
"loss": 0.0138, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 121.0, |
|
"learning_rate": 4.201754385964912e-05, |
|
"loss": 0.0142, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 121.25, |
|
"learning_rate": 4.199561403508772e-05, |
|
"loss": 0.014, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 121.5, |
|
"learning_rate": 4.1973684210526316e-05, |
|
"loss": 0.0135, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 121.75, |
|
"learning_rate": 4.1951754385964916e-05, |
|
"loss": 0.016, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 122.0, |
|
"learning_rate": 4.1929824561403516e-05, |
|
"loss": 0.013, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 122.25, |
|
"learning_rate": 4.190789473684211e-05, |
|
"loss": 0.0137, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 122.5, |
|
"learning_rate": 4.188596491228071e-05, |
|
"loss": 0.0131, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 122.75, |
|
"learning_rate": 4.18640350877193e-05, |
|
"loss": 0.0132, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 123.0, |
|
"learning_rate": 4.18421052631579e-05, |
|
"loss": 0.0147, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 123.25, |
|
"learning_rate": 4.1820175438596494e-05, |
|
"loss": 0.0142, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 123.5, |
|
"learning_rate": 4.1798245614035094e-05, |
|
"loss": 0.015, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 123.75, |
|
"learning_rate": 4.177631578947369e-05, |
|
"loss": 0.0138, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 124.0, |
|
"learning_rate": 4.1754385964912287e-05, |
|
"loss": 0.0139, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 124.25, |
|
"learning_rate": 4.173245614035088e-05, |
|
"loss": 0.0134, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 124.5, |
|
"learning_rate": 4.171052631578948e-05, |
|
"loss": 0.0136, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 124.75, |
|
"learning_rate": 4.168859649122807e-05, |
|
"loss": 0.0148, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.0141, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"eval_accuracy_ELSE": 0.9958769357299772, |
|
"eval_accuracy_road": 0.993720453380418, |
|
"eval_accuracy_sidewalk": 0.9110109828217403, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9926007420830097, |
|
"eval_iou_road": 0.9864444632912108, |
|
"eval_iou_sidewalk": 0.8285311819695224, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020598702132701874, |
|
"eval_mean_accuracy": 0.9668694573107118, |
|
"eval_mean_iou": 0.9358587957812476, |
|
"eval_overall_accuracy": 0.9930000305175781, |
|
"eval_runtime": 0.6022, |
|
"eval_samples_per_second": 1.661, |
|
"eval_steps_per_second": 1.661, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 125.25, |
|
"learning_rate": 4.1644736842105265e-05, |
|
"loss": 0.0126, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 125.5, |
|
"learning_rate": 4.1622807017543865e-05, |
|
"loss": 0.0142, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 125.75, |
|
"learning_rate": 4.160087719298246e-05, |
|
"loss": 0.0137, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 126.0, |
|
"learning_rate": 4.157894736842106e-05, |
|
"loss": 0.0144, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 126.25, |
|
"learning_rate": 4.155701754385965e-05, |
|
"loss": 0.0152, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 126.5, |
|
"learning_rate": 4.153508771929825e-05, |
|
"loss": 0.0118, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 126.75, |
|
"learning_rate": 4.151315789473684e-05, |
|
"loss": 0.0143, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 127.0, |
|
"learning_rate": 4.149122807017544e-05, |
|
"loss": 0.0132, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 127.25, |
|
"learning_rate": 4.1469298245614036e-05, |
|
"loss": 0.0139, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 127.5, |
|
"learning_rate": 4.1447368421052636e-05, |
|
"loss": 0.0132, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 127.75, |
|
"learning_rate": 4.142543859649123e-05, |
|
"loss": 0.0117, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 128.0, |
|
"learning_rate": 4.140350877192983e-05, |
|
"loss": 0.0141, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 128.25, |
|
"learning_rate": 4.138157894736842e-05, |
|
"loss": 0.0126, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 128.5, |
|
"learning_rate": 4.135964912280702e-05, |
|
"loss": 0.0155, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 128.75, |
|
"learning_rate": 4.1337719298245614e-05, |
|
"loss": 0.0143, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 129.0, |
|
"learning_rate": 4.1315789473684214e-05, |
|
"loss": 0.0122, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 129.25, |
|
"learning_rate": 4.129385964912281e-05, |
|
"loss": 0.0137, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 129.5, |
|
"learning_rate": 4.127192982456141e-05, |
|
"loss": 0.0126, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 129.75, |
|
"learning_rate": 4.125e-05, |
|
"loss": 0.013, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 130.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.0146, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 130.0, |
|
"eval_accuracy_ELSE": 0.9963260359063707, |
|
"eval_accuracy_road": 0.994176112092043, |
|
"eval_accuracy_sidewalk": 0.898338496198254, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9925451977705667, |
|
"eval_iou_road": 0.9862555090970732, |
|
"eval_iou_sidewalk": 0.8346415489272632, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020625747740268707, |
|
"eval_mean_accuracy": 0.9629468813988892, |
|
"eval_mean_iou": 0.9378140852649678, |
|
"eval_overall_accuracy": 0.9930953979492188, |
|
"eval_runtime": 0.6069, |
|
"eval_samples_per_second": 1.648, |
|
"eval_steps_per_second": 1.648, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 130.25, |
|
"learning_rate": 4.120614035087719e-05, |
|
"loss": 0.0121, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 130.5, |
|
"learning_rate": 4.118421052631579e-05, |
|
"loss": 0.0127, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 130.75, |
|
"learning_rate": 4.1162280701754385e-05, |
|
"loss": 0.0131, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 131.0, |
|
"learning_rate": 4.1140350877192985e-05, |
|
"loss": 0.0149, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 131.25, |
|
"learning_rate": 4.111842105263158e-05, |
|
"loss": 0.0128, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 131.5, |
|
"learning_rate": 4.109649122807018e-05, |
|
"loss": 0.013, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 131.75, |
|
"learning_rate": 4.107456140350877e-05, |
|
"loss": 0.0135, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 132.0, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.0125, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 132.25, |
|
"learning_rate": 4.103070175438597e-05, |
|
"loss": 0.013, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 132.5, |
|
"learning_rate": 4.100877192982456e-05, |
|
"loss": 0.0131, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 132.75, |
|
"learning_rate": 4.098684210526316e-05, |
|
"loss": 0.0134, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 133.0, |
|
"learning_rate": 4.0964912280701756e-05, |
|
"loss": 0.015, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 133.25, |
|
"learning_rate": 4.0942982456140356e-05, |
|
"loss": 0.0124, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 133.5, |
|
"learning_rate": 4.092105263157895e-05, |
|
"loss": 0.0141, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 133.75, |
|
"learning_rate": 4.089912280701755e-05, |
|
"loss": 0.0128, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 134.0, |
|
"learning_rate": 4.087719298245614e-05, |
|
"loss": 0.0141, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 134.25, |
|
"learning_rate": 4.085526315789474e-05, |
|
"loss": 0.0135, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 134.5, |
|
"learning_rate": 4.0833333333333334e-05, |
|
"loss": 0.0139, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 134.75, |
|
"learning_rate": 4.0811403508771934e-05, |
|
"loss": 0.0124, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 135.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.0132, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 135.0, |
|
"eval_accuracy_ELSE": 0.9962881599878797, |
|
"eval_accuracy_road": 0.9947456854815743, |
|
"eval_accuracy_sidewalk": 0.8931286961419318, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9926465831410519, |
|
"eval_iou_road": 0.986792666045145, |
|
"eval_iou_sidewalk": 0.8267726798748697, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.020599104464054108, |
|
"eval_mean_accuracy": 0.961387513870462, |
|
"eval_mean_iou": 0.9354039763536889, |
|
"eval_overall_accuracy": 0.9930801391601562, |
|
"eval_runtime": 0.5792, |
|
"eval_samples_per_second": 1.726, |
|
"eval_steps_per_second": 1.726, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 135.25, |
|
"learning_rate": 4.076754385964913e-05, |
|
"loss": 0.0135, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 135.5, |
|
"learning_rate": 4.074561403508772e-05, |
|
"loss": 0.0129, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 135.75, |
|
"learning_rate": 4.072368421052632e-05, |
|
"loss": 0.0121, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 136.0, |
|
"learning_rate": 4.070175438596491e-05, |
|
"loss": 0.0135, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 136.25, |
|
"learning_rate": 4.067982456140351e-05, |
|
"loss": 0.0136, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 136.5, |
|
"learning_rate": 4.0657894736842105e-05, |
|
"loss": 0.0126, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 136.75, |
|
"learning_rate": 4.0635964912280705e-05, |
|
"loss": 0.0139, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 137.0, |
|
"learning_rate": 4.06140350877193e-05, |
|
"loss": 0.0146, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 137.25, |
|
"learning_rate": 4.05921052631579e-05, |
|
"loss": 0.0133, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 137.5, |
|
"learning_rate": 4.057017543859649e-05, |
|
"loss": 0.012, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 137.75, |
|
"learning_rate": 4.054824561403509e-05, |
|
"loss": 0.0128, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 138.0, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.0128, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 138.25, |
|
"learning_rate": 4.050438596491228e-05, |
|
"loss": 0.0117, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 138.5, |
|
"learning_rate": 4.0482456140350876e-05, |
|
"loss": 0.0134, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 138.75, |
|
"learning_rate": 4.0460526315789476e-05, |
|
"loss": 0.0121, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 139.0, |
|
"learning_rate": 4.043859649122807e-05, |
|
"loss": 0.0125, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 139.25, |
|
"learning_rate": 4.041666666666667e-05, |
|
"loss": 0.0137, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 139.5, |
|
"learning_rate": 4.039473684210526e-05, |
|
"loss": 0.0126, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 139.75, |
|
"learning_rate": 4.037280701754386e-05, |
|
"loss": 0.0118, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 140.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.0116, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 140.0, |
|
"eval_accuracy_ELSE": 0.9966236324088003, |
|
"eval_accuracy_road": 0.9937489320498947, |
|
"eval_accuracy_sidewalk": 0.9062235989862011, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_ELSE": 0.9931253875394279, |
|
"eval_iou_road": 0.9870028851049386, |
|
"eval_iou_sidewalk": 0.8358441558441558, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.01912616565823555, |
|
"eval_mean_accuracy": 0.965532054481632, |
|
"eval_mean_iou": 0.9386574761628408, |
|
"eval_overall_accuracy": 0.9934043884277344, |
|
"eval_runtime": 0.6102, |
|
"eval_samples_per_second": 1.639, |
|
"eval_steps_per_second": 1.639, |
|
"step": 560 |
|
} |
|
], |
|
"max_steps": 2400, |
|
"num_train_epochs": 600, |
|
"total_flos": 3.520732237450445e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|