|
{ |
|
"best_metric": 0.4244542717933655, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b5_3/checkpoint-200", |
|
"epoch": 100.0, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"loss": 1.3484, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 1.3481, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.25e-06, |
|
"loss": 1.3531, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 1.3399, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"loss": 1.3398, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.3386, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.916666666666667e-06, |
|
"loss": 1.3376, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.3249, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 1.3205, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.3135, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.4068717172412211, |
|
"eval_accuracy_undropoff": 0.11034322178517518, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.053473330857492875, |
|
"eval_iou_undropoff": 0.1102453041626867, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.2007640600204468, |
|
"eval_mean_accuracy": 0.25860746951319813, |
|
"eval_mean_iou": 0.05457287834005986, |
|
"eval_overall_accuracy": 0.12267265319824219, |
|
"eval_runtime": 2.5511, |
|
"eval_samples_per_second": 7.84, |
|
"eval_steps_per_second": 0.784, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.583333333333333e-06, |
|
"loss": 1.3089, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-06, |
|
"loss": 1.3043, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.978070175438597e-06, |
|
"loss": 1.2871, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-06, |
|
"loss": 1.2832, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.9342105263157895e-06, |
|
"loss": 1.2709, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-06, |
|
"loss": 1.2634, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.890350877192983e-06, |
|
"loss": 1.257, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.8684210526315795e-06, |
|
"loss": 1.2403, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.846491228070176e-06, |
|
"loss": 1.2398, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 1.2309, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.43877153145714354, |
|
"eval_accuracy_undropoff": 0.24072332003617994, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.11288106949650205, |
|
"eval_iou_undropoff": 0.24004849364289937, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.1293678283691406, |
|
"eval_mean_accuracy": 0.3397474257466617, |
|
"eval_mean_iou": 0.11764318771313381, |
|
"eval_overall_accuracy": 0.24895801544189453, |
|
"eval_runtime": 2.6298, |
|
"eval_samples_per_second": 7.605, |
|
"eval_steps_per_second": 0.761, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.802631578947369e-06, |
|
"loss": 1.2159, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-06, |
|
"loss": 1.2243, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.758771929824561e-06, |
|
"loss": 1.2094, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 1.1904, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.714912280701755e-06, |
|
"loss": 1.1865, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.692982456140351e-06, |
|
"loss": 1.1715, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.671052631578948e-06, |
|
"loss": 1.1722, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-06, |
|
"loss": 1.1511, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.6271929824561405e-06, |
|
"loss": 1.1512, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-06, |
|
"loss": 1.1346, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.46940067432739285, |
|
"eval_accuracy_undropoff": 0.5035900324087019, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.15243042262584855, |
|
"eval_iou_undropoff": 0.4989255427443525, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.039512276649475, |
|
"eval_mean_accuracy": 0.4864953533680474, |
|
"eval_mean_iou": 0.21711865512340034, |
|
"eval_overall_accuracy": 0.5021684646606446, |
|
"eval_runtime": 2.9463, |
|
"eval_samples_per_second": 6.788, |
|
"eval_steps_per_second": 0.679, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.583333333333333e-06, |
|
"loss": 1.1343, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-06, |
|
"loss": 1.1182, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.539473684210527e-06, |
|
"loss": 1.132, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-06, |
|
"loss": 1.0986, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.4956140350877196e-06, |
|
"loss": 1.0887, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-06, |
|
"loss": 1.1214, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.451754385964912e-06, |
|
"loss": 1.0945, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-06, |
|
"loss": 1.076, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 4.407894736842105e-06, |
|
"loss": 1.0444, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.385964912280702e-06, |
|
"loss": 1.1088, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.48076332025963897, |
|
"eval_accuracy_undropoff": 0.6235115828521449, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1660792839847365, |
|
"eval_iou_undropoff": 0.616259059512146, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9755474328994751, |
|
"eval_mean_accuracy": 0.5521374515558919, |
|
"eval_mean_iou": 0.2607794478322942, |
|
"eval_overall_accuracy": 0.6175762176513672, |
|
"eval_runtime": 3.0937, |
|
"eval_samples_per_second": 6.465, |
|
"eval_steps_per_second": 0.646, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 4.364035087719299e-06, |
|
"loss": 1.0507, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-06, |
|
"loss": 1.0644, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 4.320175438596491e-06, |
|
"loss": 1.0289, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-06, |
|
"loss": 1.0551, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.276315789473684e-06, |
|
"loss": 1.0025, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-06, |
|
"loss": 1.0486, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 4.232456140350878e-06, |
|
"loss": 1.019, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-06, |
|
"loss": 0.9953, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.1885964912280705e-06, |
|
"loss": 0.9797, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.007, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.506777678387119, |
|
"eval_accuracy_undropoff": 0.6849468196784603, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.19228861306779316, |
|
"eval_iou_undropoff": 0.6762847927344823, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.9196955561637878, |
|
"eval_mean_accuracy": 0.5958622490327896, |
|
"eval_mean_iou": 0.2895244686007585, |
|
"eval_overall_accuracy": 0.6775386810302735, |
|
"eval_runtime": 2.8681, |
|
"eval_samples_per_second": 6.973, |
|
"eval_steps_per_second": 0.697, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.144736842105263e-06, |
|
"loss": 0.9886, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-06, |
|
"loss": 0.9687, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.100877192982457e-06, |
|
"loss": 0.9731, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-06, |
|
"loss": 0.9503, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.05701754385965e-06, |
|
"loss": 0.9563, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.035087719298246e-06, |
|
"loss": 0.9525, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.013157894736842e-06, |
|
"loss": 0.911, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-06, |
|
"loss": 0.9646, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 3.969298245614036e-06, |
|
"loss": 0.9424, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.947368421052632e-06, |
|
"loss": 0.9145, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.5167962567948806, |
|
"eval_accuracy_undropoff": 0.7429246639475331, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.21555466691667607, |
|
"eval_iou_undropoff": 0.7329009745349515, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8634790182113647, |
|
"eval_mean_accuracy": 0.6298604603712068, |
|
"eval_mean_iou": 0.3161518804838759, |
|
"eval_overall_accuracy": 0.7335224151611328, |
|
"eval_runtime": 2.85, |
|
"eval_samples_per_second": 7.018, |
|
"eval_steps_per_second": 0.702, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 3.925438596491229e-06, |
|
"loss": 0.9084, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.903508771929825e-06, |
|
"loss": 0.9234, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 3.8815789473684214e-06, |
|
"loss": 0.9259, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-06, |
|
"loss": 0.8798, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 3.837719298245615e-06, |
|
"loss": 0.8789, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-06, |
|
"loss": 0.8941, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 3.7938596491228073e-06, |
|
"loss": 0.8674, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.7719298245614037e-06, |
|
"loss": 0.8831, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.8512, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-06, |
|
"loss": 0.8745, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.5666827220807816, |
|
"eval_accuracy_undropoff": 0.7900781808936921, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2404217803052419, |
|
"eval_iou_undropoff": 0.7790981497740644, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8070214986801147, |
|
"eval_mean_accuracy": 0.6783804514872369, |
|
"eval_mean_iou": 0.3398399766931021, |
|
"eval_overall_accuracy": 0.780789566040039, |
|
"eval_runtime": 2.9903, |
|
"eval_samples_per_second": 6.688, |
|
"eval_steps_per_second": 0.669, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 3.7061403508771933e-06, |
|
"loss": 0.8354, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157896e-06, |
|
"loss": 0.8436, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 3.662280701754386e-06, |
|
"loss": 0.8022, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-06, |
|
"loss": 0.8872, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 3.618421052631579e-06, |
|
"loss": 0.8419, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.596491228070176e-06, |
|
"loss": 0.8014, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 3.5745614035087724e-06, |
|
"loss": 0.8142, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473687e-06, |
|
"loss": 0.7952, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 3.530701754385965e-06, |
|
"loss": 0.7875, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.8088, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.5992797999954127, |
|
"eval_accuracy_undropoff": 0.8389431797941644, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27303754266211605, |
|
"eval_iou_undropoff": 0.8271554915987344, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7441512942314148, |
|
"eval_mean_accuracy": 0.7191114898947886, |
|
"eval_mean_iou": 0.36673101142028347, |
|
"eval_overall_accuracy": 0.828978157043457, |
|
"eval_runtime": 2.8979, |
|
"eval_samples_per_second": 6.902, |
|
"eval_steps_per_second": 0.69, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 3.486842105263158e-06, |
|
"loss": 0.7679, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 3.464912280701755e-06, |
|
"loss": 0.8128, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 3.4429824561403515e-06, |
|
"loss": 0.805, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 3.421052631578948e-06, |
|
"loss": 0.7599, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 3.399122807017544e-06, |
|
"loss": 0.7712, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 3.3771929824561406e-06, |
|
"loss": 0.7746, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 3.355263157894737e-06, |
|
"loss": 0.7474, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 0.7928, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 3.3114035087719297e-06, |
|
"loss": 0.7545, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.289473684210527e-06, |
|
"loss": 0.7184, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.6322713823711553, |
|
"eval_accuracy_undropoff": 0.8702378263383143, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2914895506858354, |
|
"eval_iou_undropoff": 0.8580097549336819, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6956223249435425, |
|
"eval_mean_accuracy": 0.7512546043547348, |
|
"eval_mean_iou": 0.3831664352065058, |
|
"eval_overall_accuracy": 0.8603433609008789, |
|
"eval_runtime": 2.9361, |
|
"eval_samples_per_second": 6.812, |
|
"eval_steps_per_second": 0.681, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 3.2675438596491233e-06, |
|
"loss": 0.7254, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.2456140350877197e-06, |
|
"loss": 0.7387, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.223684210526316e-06, |
|
"loss": 0.7656, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.2017543859649124e-06, |
|
"loss": 0.6769, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 3.179824561403509e-06, |
|
"loss": 0.7103, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.157894736842105e-06, |
|
"loss": 0.7061, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 3.135964912280702e-06, |
|
"loss": 0.6942, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.1140350877192988e-06, |
|
"loss": 0.6753, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 3.092105263157895e-06, |
|
"loss": 0.6628, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.0701754385964915e-06, |
|
"loss": 0.6908, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.6332025963898255, |
|
"eval_accuracy_undropoff": 0.8852680210591884, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3066633490254799, |
|
"eval_iou_undropoff": 0.872782516292678, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6750925779342651, |
|
"eval_mean_accuracy": 0.759235308724507, |
|
"eval_mean_iou": 0.3931486217727193, |
|
"eval_overall_accuracy": 0.8747873306274414, |
|
"eval_runtime": 2.9141, |
|
"eval_samples_per_second": 6.863, |
|
"eval_steps_per_second": 0.686, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 3.048245614035088e-06, |
|
"loss": 0.674, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.0263157894736843e-06, |
|
"loss": 0.7057, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 3.004385964912281e-06, |
|
"loss": 0.6581, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.9824561403508774e-06, |
|
"loss": 0.6645, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 2.960526315789474e-06, |
|
"loss": 0.711, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 2.9385964912280706e-06, |
|
"loss": 0.6255, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 2.916666666666667e-06, |
|
"loss": 0.6572, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 2.8947368421052634e-06, |
|
"loss": 0.6244, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 2.8728070175438597e-06, |
|
"loss": 0.6323, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 2.8508771929824565e-06, |
|
"loss": 0.643, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.6194224638179775, |
|
"eval_accuracy_undropoff": 0.923405212258589, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3308480506497834, |
|
"eval_iou_undropoff": 0.9093924657307382, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6101020574569702, |
|
"eval_mean_accuracy": 0.7714138380382832, |
|
"eval_mean_iou": 0.4134135054601738, |
|
"eval_overall_accuracy": 0.9107658386230468, |
|
"eval_runtime": 2.8349, |
|
"eval_samples_per_second": 7.055, |
|
"eval_steps_per_second": 0.705, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 2.828947368421053e-06, |
|
"loss": 0.6638, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 2.8070175438596493e-06, |
|
"loss": 0.5887, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 2.7850877192982456e-06, |
|
"loss": 0.648, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 2.7631578947368424e-06, |
|
"loss": 0.578, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 2.741228070175439e-06, |
|
"loss": 0.6064, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 2.7192982456140356e-06, |
|
"loss": 0.6784, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 2.697368421052632e-06, |
|
"loss": 0.624, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 2.6754385964912284e-06, |
|
"loss": 0.6188, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 2.6535087719298247e-06, |
|
"loss": 0.607, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.6014, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.6338585747379527, |
|
"eval_accuracy_undropoff": 0.9313126569065759, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3323511938406625, |
|
"eval_iou_undropoff": 0.9174802078875154, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5971364974975586, |
|
"eval_mean_accuracy": 0.7825856158222644, |
|
"eval_mean_iou": 0.41661046724272593, |
|
"eval_overall_accuracy": 0.9189447402954102, |
|
"eval_runtime": 3.1128, |
|
"eval_samples_per_second": 6.425, |
|
"eval_steps_per_second": 0.643, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 2.6096491228070175e-06, |
|
"loss": 0.5448, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 2.5877192982456147e-06, |
|
"loss": 0.712, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 2.565789473684211e-06, |
|
"loss": 0.6238, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 2.5438596491228075e-06, |
|
"loss": 0.5561, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 2.521929824561404e-06, |
|
"loss": 0.5693, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.6064, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 2.4780701754385966e-06, |
|
"loss": 0.5594, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.456140350877193e-06, |
|
"loss": 0.574, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 2.4342105263157898e-06, |
|
"loss": 0.5481, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.412280701754386e-06, |
|
"loss": 0.5685, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.6439000894515929, |
|
"eval_accuracy_undropoff": 0.9453340723220531, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35985356361855786, |
|
"eval_iou_undropoff": 0.9313976351706408, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5594618916511536, |
|
"eval_mean_accuracy": 0.794617080886823, |
|
"eval_mean_iou": 0.4304170662630662, |
|
"eval_overall_accuracy": 0.9328006744384766, |
|
"eval_runtime": 3.0082, |
|
"eval_samples_per_second": 6.649, |
|
"eval_steps_per_second": 0.665, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 2.3903508771929825e-06, |
|
"loss": 0.5226, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.6177, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.3464912280701757e-06, |
|
"loss": 0.5917, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.324561403508772e-06, |
|
"loss": 0.5205, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 2.3026315789473684e-06, |
|
"loss": 0.5653, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.280701754385965e-06, |
|
"loss": 0.5249, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 2.2587719298245616e-06, |
|
"loss": 0.4962, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.236842105263158e-06, |
|
"loss": 0.5615, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 2.2149122807017543e-06, |
|
"loss": 0.4999, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.192982456140351e-06, |
|
"loss": 0.5172, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.6487855226037295, |
|
"eval_accuracy_undropoff": 0.9532476862654569, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.37272109166231837, |
|
"eval_iou_undropoff": 0.9393002997549955, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.5343549251556396, |
|
"eval_mean_accuracy": 0.8010166044345932, |
|
"eval_mean_iou": 0.43734046380577124, |
|
"eval_overall_accuracy": 0.94058837890625, |
|
"eval_runtime": 2.961, |
|
"eval_samples_per_second": 6.754, |
|
"eval_steps_per_second": 0.675, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 2.1710526315789475e-06, |
|
"loss": 0.527, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.149122807017544e-06, |
|
"loss": 0.5286, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.1271929824561407e-06, |
|
"loss": 0.5048, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.105263157894737e-06, |
|
"loss": 0.5011, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.0833333333333334e-06, |
|
"loss": 0.512, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.06140350877193e-06, |
|
"loss": 0.5146, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.0394736842105266e-06, |
|
"loss": 0.4928, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.017543859649123e-06, |
|
"loss": 0.5346, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 1.9956140350877194e-06, |
|
"loss": 0.5102, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.973684210526316e-06, |
|
"loss": 0.4757, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.6368265327186403, |
|
"eval_accuracy_undropoff": 0.9625887955644756, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.38222216103853196, |
|
"eval_iou_undropoff": 0.947906194306642, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4962589144706726, |
|
"eval_mean_accuracy": 0.7997076641415579, |
|
"eval_mean_iou": 0.4433761184483913, |
|
"eval_overall_accuracy": 0.9490438461303711, |
|
"eval_runtime": 2.9269, |
|
"eval_samples_per_second": 6.833, |
|
"eval_steps_per_second": 0.683, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 1.9517543859649125e-06, |
|
"loss": 0.5084, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.929824561403509e-06, |
|
"loss": 0.4696, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 1.9078947368421057e-06, |
|
"loss": 0.5123, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.8859649122807019e-06, |
|
"loss": 0.4738, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 1.8640350877192984e-06, |
|
"loss": 0.5085, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.8421052631578948e-06, |
|
"loss": 0.4322, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 1.8201754385964914e-06, |
|
"loss": 0.4899, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.798245614035088e-06, |
|
"loss": 0.4354, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 1.7763157894736844e-06, |
|
"loss": 0.5503, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.7543859649122807e-06, |
|
"loss": 0.4288, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.6168673593431042, |
|
"eval_accuracy_undropoff": 0.970246284243321, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3917943279355294, |
|
"eval_iou_undropoff": 0.9545508298763311, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.45985984802246094, |
|
"eval_mean_accuracy": 0.7935568217932126, |
|
"eval_mean_iou": 0.4487817192706201, |
|
"eval_overall_accuracy": 0.9555530548095703, |
|
"eval_runtime": 3.3564, |
|
"eval_samples_per_second": 5.959, |
|
"eval_steps_per_second": 0.596, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 1.7324561403508775e-06, |
|
"loss": 0.4411, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 1.710526315789474e-06, |
|
"loss": 0.4471, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 1.6885964912280703e-06, |
|
"loss": 0.4561, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1.6666666666666667e-06, |
|
"loss": 0.4375, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 1.6447368421052635e-06, |
|
"loss": 0.4263, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 1.6228070175438598e-06, |
|
"loss": 0.4892, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 1.6008771929824562e-06, |
|
"loss": 0.4613, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.4389, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 1.5570175438596494e-06, |
|
"loss": 0.5088, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 1.5350877192982458e-06, |
|
"loss": 0.4124, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.6296428817174706, |
|
"eval_accuracy_undropoff": 0.96810195656219, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.38764311494947556, |
|
"eval_iou_undropoff": 0.9529083620698436, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.47101444005966187, |
|
"eval_mean_accuracy": 0.7988724191398303, |
|
"eval_mean_iou": 0.4468504923397731, |
|
"eval_overall_accuracy": 0.9540290832519531, |
|
"eval_runtime": 3.1781, |
|
"eval_samples_per_second": 6.293, |
|
"eval_steps_per_second": 0.629, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 1.5131578947368421e-06, |
|
"loss": 0.444, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 1.4912280701754387e-06, |
|
"loss": 0.4318, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 1.4692982456140353e-06, |
|
"loss": 0.441, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 1.4473684210526317e-06, |
|
"loss": 0.4186, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.4254385964912283e-06, |
|
"loss": 0.3905, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.4035087719298246e-06, |
|
"loss": 0.4534, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.3815789473684212e-06, |
|
"loss": 0.4197, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.3596491228070178e-06, |
|
"loss": 0.421, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.3377192982456142e-06, |
|
"loss": 0.4123, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.4995, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.6004220280281658, |
|
"eval_accuracy_undropoff": 0.9762418443407163, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.4015369512531828, |
|
"eval_iou_undropoff": 0.9596764001720002, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.42094817757606506, |
|
"eval_mean_accuracy": 0.788331936184441, |
|
"eval_mean_iou": 0.4537377838083943, |
|
"eval_overall_accuracy": 0.9606155395507813, |
|
"eval_runtime": 3.0265, |
|
"eval_samples_per_second": 6.608, |
|
"eval_steps_per_second": 0.661, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.2938596491228074e-06, |
|
"loss": 0.3708, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.2719298245614037e-06, |
|
"loss": 0.5003, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.3846, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.2280701754385965e-06, |
|
"loss": 0.4393, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.206140350877193e-06, |
|
"loss": 0.416, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.1842105263157894e-06, |
|
"loss": 0.3925, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.162280701754386e-06, |
|
"loss": 0.4545, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.1403508771929824e-06, |
|
"loss": 0.3882, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.118421052631579e-06, |
|
"loss": 0.4603, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.0964912280701756e-06, |
|
"loss": 0.3815, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.6089635083373471, |
|
"eval_accuracy_undropoff": 0.9747594223549395, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39879296567552464, |
|
"eval_iou_undropoff": 0.9585519879558038, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4287094175815582, |
|
"eval_mean_accuracy": 0.7918614653461433, |
|
"eval_mean_iou": 0.4524483178771095, |
|
"eval_overall_accuracy": 0.9595499038696289, |
|
"eval_runtime": 2.9721, |
|
"eval_samples_per_second": 6.729, |
|
"eval_steps_per_second": 0.673, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.074561403508772e-06, |
|
"loss": 0.4023, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.0526315789473685e-06, |
|
"loss": 0.4105, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.030701754385965e-06, |
|
"loss": 0.4662, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.0087719298245615e-06, |
|
"loss": 0.392, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 9.86842105263158e-07, |
|
"loss": 0.3692, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 9.649122807017545e-07, |
|
"loss": 0.4193, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 9.429824561403509e-07, |
|
"loss": 0.3569, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 9.210526315789474e-07, |
|
"loss": 0.4784, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 8.99122807017544e-07, |
|
"loss": 0.4157, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 8.771929824561404e-07, |
|
"loss": 0.3764, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.6073120943140897, |
|
"eval_accuracy_undropoff": 0.9753421222575243, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.39976024832717344, |
|
"eval_iou_undropoff": 0.9590456081576902, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4244542717933655, |
|
"eval_mean_accuracy": 0.791327108285807, |
|
"eval_mean_iou": 0.4529352854949546, |
|
"eval_overall_accuracy": 0.9600397109985351, |
|
"eval_runtime": 2.942, |
|
"eval_samples_per_second": 6.798, |
|
"eval_steps_per_second": 0.68, |
|
"step": 200 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 1.1575998499258368e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|