|
{ |
|
"best_metric": 0.1285596638917923, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGBD-b5_7/checkpoint-160", |
|
"epoch": 120.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 1.1395, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 1.1418, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.1243, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.1018, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 1.0687, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.0541, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 1.022, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.9945, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.9642, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.9278, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy_dropoff": 0.2009128649739673, |
|
"eval_accuracy_undropoff": 0.9081783961224983, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0807428683093736, |
|
"eval_iou_undropoff": 0.8784609129926589, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.845426082611084, |
|
"eval_mean_accuracy": 0.5545456305482328, |
|
"eval_mean_iou": 0.3197345937673442, |
|
"eval_overall_accuracy": 0.8787708282470703, |
|
"eval_runtime": 2.7556, |
|
"eval_samples_per_second": 7.258, |
|
"eval_steps_per_second": 0.726, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.8786, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8476, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 4.9780701754385966e-05, |
|
"loss": 0.7993, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 4.956140350877193e-05, |
|
"loss": 0.7682, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 4.9342105263157894e-05, |
|
"loss": 0.7194, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 4.912280701754386e-05, |
|
"loss": 0.7016, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 4.890350877192983e-05, |
|
"loss": 0.6592, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.868421052631579e-05, |
|
"loss": 0.6162, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 4.8464912280701755e-05, |
|
"loss": 0.5957, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 4.824561403508772e-05, |
|
"loss": 0.5551, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.013546182251886512, |
|
"eval_accuracy_undropoff": 0.994846449222221, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0122168669714333, |
|
"eval_iou_undropoff": 0.9540401274092, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4667937755584717, |
|
"eval_mean_accuracy": 0.5041963157370537, |
|
"eval_mean_iou": 0.32208566479354445, |
|
"eval_overall_accuracy": 0.9540447235107422, |
|
"eval_runtime": 2.9472, |
|
"eval_samples_per_second": 6.786, |
|
"eval_steps_per_second": 0.679, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 4.802631578947368e-05, |
|
"loss": 0.5393, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.780701754385965e-05, |
|
"loss": 0.5054, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 4.758771929824562e-05, |
|
"loss": 0.4768, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.736842105263158e-05, |
|
"loss": 0.4747, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"learning_rate": 4.714912280701755e-05, |
|
"loss": 0.4453, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 4.6929824561403515e-05, |
|
"loss": 0.4263, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 4.671052631578948e-05, |
|
"loss": 0.4107, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 4.649122807017544e-05, |
|
"loss": 0.3888, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"learning_rate": 4.6271929824561406e-05, |
|
"loss": 0.3656, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.3667, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_accuracy_dropoff": 0.008839652285602882, |
|
"eval_accuracy_undropoff": 0.9981533905751077, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.0085100181505836, |
|
"eval_iou_undropoff": 0.9570052236822447, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.33538389205932617, |
|
"eval_mean_accuracy": 0.5034965214303553, |
|
"eval_mean_iou": 0.3218384139442761, |
|
"eval_overall_accuracy": 0.9570184707641601, |
|
"eval_runtime": 11.3505, |
|
"eval_samples_per_second": 1.762, |
|
"eval_steps_per_second": 0.176, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"learning_rate": 4.5833333333333334e-05, |
|
"loss": 0.3391, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 4.56140350877193e-05, |
|
"loss": 0.3252, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"learning_rate": 4.539473684210527e-05, |
|
"loss": 0.3199, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 4.517543859649123e-05, |
|
"loss": 0.2957, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"learning_rate": 4.4956140350877196e-05, |
|
"loss": 0.2837, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 4.473684210526316e-05, |
|
"loss": 0.2764, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"learning_rate": 4.451754385964912e-05, |
|
"loss": 0.239, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 4.429824561403509e-05, |
|
"loss": 0.2586, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"learning_rate": 4.407894736842105e-05, |
|
"loss": 0.2539, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.3859649122807014e-05, |
|
"loss": 0.2402, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.31156219179338973, |
|
"eval_accuracy_undropoff": 0.9867716375598645, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.23875964087096524, |
|
"eval_iou_undropoff": 0.9581548537274001, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.26776206493377686, |
|
"eval_mean_accuracy": 0.6491669146766271, |
|
"eval_mean_iou": 0.5984572472991827, |
|
"eval_overall_accuracy": 0.9586969375610351, |
|
"eval_runtime": 3.4868, |
|
"eval_samples_per_second": 5.736, |
|
"eval_steps_per_second": 0.574, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"learning_rate": 4.3640350877192985e-05, |
|
"loss": 0.2083, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.1978, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"learning_rate": 4.320175438596491e-05, |
|
"loss": 0.2042, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 4.298245614035088e-05, |
|
"loss": 0.1683, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"learning_rate": 4.2763157894736847e-05, |
|
"loss": 0.1905, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 4.254385964912281e-05, |
|
"loss": 0.1825, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"learning_rate": 4.2324561403508774e-05, |
|
"loss": 0.1595, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 4.210526315789474e-05, |
|
"loss": 0.1808, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"learning_rate": 4.188596491228071e-05, |
|
"loss": 0.1612, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.1562, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_accuracy_dropoff": 0.35438886212986537, |
|
"eval_accuracy_undropoff": 0.9895070235438224, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2853675924660444, |
|
"eval_iou_undropoff": 0.9625474168179068, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2101479470729828, |
|
"eval_mean_accuracy": 0.6719479428368439, |
|
"eval_mean_iou": 0.6239575046419756, |
|
"eval_overall_accuracy": 0.9630992889404297, |
|
"eval_runtime": 2.857, |
|
"eval_samples_per_second": 7.0, |
|
"eval_steps_per_second": 0.7, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.1447368421052636e-05, |
|
"loss": 0.1405, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 4.12280701754386e-05, |
|
"loss": 0.1564, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 26.5, |
|
"learning_rate": 4.100877192982456e-05, |
|
"loss": 0.1386, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 4.078947368421053e-05, |
|
"loss": 0.1369, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 27.5, |
|
"learning_rate": 4.057017543859649e-05, |
|
"loss": 0.1337, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 4.0350877192982455e-05, |
|
"loss": 0.1216, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 28.5, |
|
"learning_rate": 4.0131578947368425e-05, |
|
"loss": 0.1219, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 3.991228070175439e-05, |
|
"loss": 0.1291, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 29.5, |
|
"learning_rate": 3.969298245614035e-05, |
|
"loss": 0.1128, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.9473684210526316e-05, |
|
"loss": 0.1159, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.3353287919447694, |
|
"eval_accuracy_undropoff": 0.9927785412004454, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28747610920159505, |
|
"eval_iou_undropoff": 0.9649536343906318, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.17043346166610718, |
|
"eval_mean_accuracy": 0.6640536665726073, |
|
"eval_mean_iou": 0.6262148717961133, |
|
"eval_overall_accuracy": 0.9654422760009765, |
|
"eval_runtime": 2.9147, |
|
"eval_samples_per_second": 6.862, |
|
"eval_steps_per_second": 0.686, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 30.5, |
|
"learning_rate": 3.925438596491228e-05, |
|
"loss": 0.1099, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 3.9035087719298244e-05, |
|
"loss": 0.1059, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 31.5, |
|
"learning_rate": 3.8815789473684214e-05, |
|
"loss": 0.1125, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 3.859649122807018e-05, |
|
"loss": 0.1054, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 32.5, |
|
"learning_rate": 3.837719298245614e-05, |
|
"loss": 0.0973, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 3.815789473684211e-05, |
|
"loss": 0.1075, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 33.5, |
|
"learning_rate": 3.7938596491228076e-05, |
|
"loss": 0.0956, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 0.0914, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 34.5, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 0.0927, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 3.728070175438597e-05, |
|
"loss": 0.0869, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"eval_accuracy_dropoff": 0.3719947705222597, |
|
"eval_accuracy_undropoff": 0.9914632872195085, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3108309600561154, |
|
"eval_iou_undropoff": 0.9651674894760284, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.144324392080307, |
|
"eval_mean_accuracy": 0.6817290288708842, |
|
"eval_mean_iou": 0.6379992247660718, |
|
"eval_overall_accuracy": 0.9657062530517578, |
|
"eval_runtime": 2.8546, |
|
"eval_samples_per_second": 7.006, |
|
"eval_steps_per_second": 0.701, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 35.5, |
|
"learning_rate": 3.706140350877193e-05, |
|
"loss": 0.0825, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 0.0889, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 3.662280701754386e-05, |
|
"loss": 0.09, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.640350877192983e-05, |
|
"loss": 0.0929, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 37.5, |
|
"learning_rate": 3.618421052631579e-05, |
|
"loss": 0.0851, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 3.5964912280701756e-05, |
|
"loss": 0.0811, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 3.574561403508772e-05, |
|
"loss": 0.0817, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 3.5526315789473684e-05, |
|
"loss": 0.0867, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 3.530701754385965e-05, |
|
"loss": 0.0818, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 3.508771929824561e-05, |
|
"loss": 0.079, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.2766485469850226, |
|
"eval_accuracy_undropoff": 0.995262379139025, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24941170632048668, |
|
"eval_iou_undropoff": 0.9649801295572932, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.13497576117515564, |
|
"eval_mean_accuracy": 0.6359554630620239, |
|
"eval_mean_iou": 0.6071959179388899, |
|
"eval_overall_accuracy": 0.9653829574584961, |
|
"eval_runtime": 2.8312, |
|
"eval_samples_per_second": 7.064, |
|
"eval_steps_per_second": 0.706, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 40.5, |
|
"learning_rate": 3.4868421052631575e-05, |
|
"loss": 0.073, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 3.4649122807017546e-05, |
|
"loss": 0.0825, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 41.5, |
|
"learning_rate": 3.442982456140351e-05, |
|
"loss": 0.0688, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 3.421052631578947e-05, |
|
"loss": 0.0813, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 42.5, |
|
"learning_rate": 3.3991228070175444e-05, |
|
"loss": 0.0745, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 3.377192982456141e-05, |
|
"loss": 0.0759, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 43.5, |
|
"learning_rate": 3.355263157894737e-05, |
|
"loss": 0.0706, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0767, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 44.5, |
|
"learning_rate": 3.31140350877193e-05, |
|
"loss": 0.0736, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 3.289473684210527e-05, |
|
"loss": 0.0647, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"eval_accuracy_dropoff": 0.20900479368792863, |
|
"eval_accuracy_undropoff": 0.9971020232303824, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.195917559996044, |
|
"eval_iou_undropoff": 0.9640208661467319, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.13696631789207458, |
|
"eval_mean_accuracy": 0.6030534084591556, |
|
"eval_mean_iou": 0.5799692130713879, |
|
"eval_overall_accuracy": 0.9643335342407227, |
|
"eval_runtime": 2.9184, |
|
"eval_samples_per_second": 6.853, |
|
"eval_steps_per_second": 0.685, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 45.5, |
|
"learning_rate": 3.267543859649123e-05, |
|
"loss": 0.0677, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 3.24561403508772e-05, |
|
"loss": 0.0656, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 46.5, |
|
"learning_rate": 3.223684210526316e-05, |
|
"loss": 0.0811, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 3.2017543859649124e-05, |
|
"loss": 0.062, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 47.5, |
|
"learning_rate": 3.179824561403509e-05, |
|
"loss": 0.0664, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 3.157894736842105e-05, |
|
"loss": 0.0622, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 48.5, |
|
"learning_rate": 3.1359649122807015e-05, |
|
"loss": 0.0658, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 3.1140350877192986e-05, |
|
"loss": 0.0667, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 49.5, |
|
"learning_rate": 3.092105263157895e-05, |
|
"loss": 0.0632, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.0701754385964913e-05, |
|
"loss": 0.0587, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.37065987752012663, |
|
"eval_accuracy_undropoff": 0.9884823234760597, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29289882915866167, |
|
"eval_iou_undropoff": 0.9622113420215658, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1336326152086258, |
|
"eval_mean_accuracy": 0.6795711004980931, |
|
"eval_mean_iou": 0.6275550855901137, |
|
"eval_overall_accuracy": 0.9627937316894531, |
|
"eval_runtime": 2.7754, |
|
"eval_samples_per_second": 7.206, |
|
"eval_steps_per_second": 0.721, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 50.5, |
|
"learning_rate": 3.048245614035088e-05, |
|
"loss": 0.0589, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 3.0263157894736844e-05, |
|
"loss": 0.0615, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 51.5, |
|
"learning_rate": 3.0043859649122808e-05, |
|
"loss": 0.0564, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 2.9824561403508772e-05, |
|
"loss": 0.0621, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 52.5, |
|
"learning_rate": 2.9605263157894735e-05, |
|
"loss": 0.0586, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 2.9385964912280706e-05, |
|
"loss": 0.0559, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 53.5, |
|
"learning_rate": 2.916666666666667e-05, |
|
"loss": 0.0545, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 2.8947368421052634e-05, |
|
"loss": 0.0576, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 54.5, |
|
"learning_rate": 2.8728070175438597e-05, |
|
"loss": 0.0547, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 2.850877192982456e-05, |
|
"loss": 0.0575, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"eval_accuracy_dropoff": 0.3125713892520471, |
|
"eval_accuracy_undropoff": 0.993698363246124, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2729271809661139, |
|
"eval_iou_undropoff": 0.9649217226523994, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.13126148283481598, |
|
"eval_mean_accuracy": 0.6531348762490856, |
|
"eval_mean_iou": 0.6189244518092567, |
|
"eval_overall_accuracy": 0.9653776168823243, |
|
"eval_runtime": 6.3082, |
|
"eval_samples_per_second": 3.17, |
|
"eval_steps_per_second": 0.317, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 55.5, |
|
"learning_rate": 2.8289473684210528e-05, |
|
"loss": 0.0548, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 2.8070175438596492e-05, |
|
"loss": 0.0551, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 56.5, |
|
"learning_rate": 2.7850877192982456e-05, |
|
"loss": 0.0593, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 2.7631578947368426e-05, |
|
"loss": 0.0489, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 2.741228070175439e-05, |
|
"loss": 0.0536, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 2.7192982456140354e-05, |
|
"loss": 0.0562, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 58.5, |
|
"learning_rate": 2.6973684210526317e-05, |
|
"loss": 0.0533, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 2.675438596491228e-05, |
|
"loss": 0.0494, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 59.5, |
|
"learning_rate": 2.6535087719298245e-05, |
|
"loss": 0.0518, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 2.6315789473684212e-05, |
|
"loss": 0.0527, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.3390628225418014, |
|
"eval_accuracy_undropoff": 0.9919562338242567, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28602939469223804, |
|
"eval_iou_undropoff": 0.9643062074697338, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12983748316764832, |
|
"eval_mean_accuracy": 0.665509528183029, |
|
"eval_mean_iou": 0.6251678010809859, |
|
"eval_overall_accuracy": 0.9648094177246094, |
|
"eval_runtime": 13.8883, |
|
"eval_samples_per_second": 1.44, |
|
"eval_steps_per_second": 0.144, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 2.6096491228070176e-05, |
|
"loss": 0.0487, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 2.5877192982456143e-05, |
|
"loss": 0.0562, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 2.565789473684211e-05, |
|
"loss": 0.0496, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 2.5438596491228074e-05, |
|
"loss": 0.0513, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 62.5, |
|
"learning_rate": 2.5219298245614037e-05, |
|
"loss": 0.0518, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0512, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 2.4780701754385965e-05, |
|
"loss": 0.0454, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 2.456140350877193e-05, |
|
"loss": 0.0523, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 64.5, |
|
"learning_rate": 2.4342105263157896e-05, |
|
"loss": 0.0483, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 2.412280701754386e-05, |
|
"loss": 0.0491, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"eval_accuracy_dropoff": 0.3063281267918989, |
|
"eval_accuracy_undropoff": 0.9920477782078595, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2588756134814734, |
|
"eval_iou_undropoff": 0.963065642833159, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.13133761286735535, |
|
"eval_mean_accuracy": 0.6491879524998792, |
|
"eval_mean_iou": 0.6109706281573162, |
|
"eval_overall_accuracy": 0.9635360717773438, |
|
"eval_runtime": 2.8172, |
|
"eval_samples_per_second": 7.099, |
|
"eval_steps_per_second": 0.71, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 65.5, |
|
"learning_rate": 2.3903508771929827e-05, |
|
"loss": 0.0477, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.368421052631579e-05, |
|
"loss": 0.0476, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 66.5, |
|
"learning_rate": 2.3464912280701758e-05, |
|
"loss": 0.0524, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 2.324561403508772e-05, |
|
"loss": 0.0453, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 67.5, |
|
"learning_rate": 2.3026315789473685e-05, |
|
"loss": 0.0456, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 2.280701754385965e-05, |
|
"loss": 0.0508, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 2.2587719298245616e-05, |
|
"loss": 0.0434, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 2.236842105263158e-05, |
|
"loss": 0.0484, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 69.5, |
|
"learning_rate": 2.2149122807017543e-05, |
|
"loss": 0.0459, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 2.1929824561403507e-05, |
|
"loss": 0.0441, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.2919333012225051, |
|
"eval_accuracy_undropoff": 0.9939467271390291, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2561873017406566, |
|
"eval_iou_undropoff": 0.9643244954858242, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12954853475093842, |
|
"eval_mean_accuracy": 0.6429400141807671, |
|
"eval_mean_iou": 0.6102558986132405, |
|
"eval_overall_accuracy": 0.9647575378417969, |
|
"eval_runtime": 2.8094, |
|
"eval_samples_per_second": 7.119, |
|
"eval_steps_per_second": 0.712, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 70.5, |
|
"learning_rate": 2.1710526315789474e-05, |
|
"loss": 0.0445, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 2.149122807017544e-05, |
|
"loss": 0.0447, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 71.5, |
|
"learning_rate": 2.1271929824561405e-05, |
|
"loss": 0.0415, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.105263157894737e-05, |
|
"loss": 0.0451, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 72.5, |
|
"learning_rate": 2.0833333333333336e-05, |
|
"loss": 0.0433, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.06140350877193e-05, |
|
"loss": 0.0411, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 73.5, |
|
"learning_rate": 2.0394736842105264e-05, |
|
"loss": 0.0418, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.0175438596491227e-05, |
|
"loss": 0.0477, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 74.5, |
|
"learning_rate": 1.9956140350877194e-05, |
|
"loss": 0.0404, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.9736842105263158e-05, |
|
"loss": 0.0426, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"eval_accuracy_dropoff": 0.3333470951168605, |
|
"eval_accuracy_undropoff": 0.9932967620154491, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2887340172760432, |
|
"eval_iou_undropoff": 0.9653766615372832, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12331400066614151, |
|
"eval_mean_accuracy": 0.6633219285661548, |
|
"eval_mean_iou": 0.6270553394066631, |
|
"eval_overall_accuracy": 0.9658565521240234, |
|
"eval_runtime": 6.1041, |
|
"eval_samples_per_second": 3.276, |
|
"eval_steps_per_second": 0.328, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 75.5, |
|
"learning_rate": 1.9517543859649122e-05, |
|
"loss": 0.0425, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 0.0431, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 76.5, |
|
"learning_rate": 1.9078947368421056e-05, |
|
"loss": 0.0389, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 1.885964912280702e-05, |
|
"loss": 0.0433, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 77.5, |
|
"learning_rate": 1.8640350877192984e-05, |
|
"loss": 0.043, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.0383, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 78.5, |
|
"learning_rate": 1.8201754385964914e-05, |
|
"loss": 0.0412, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 1.7982456140350878e-05, |
|
"loss": 0.0412, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 79.5, |
|
"learning_rate": 1.7763157894736842e-05, |
|
"loss": 0.0402, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 1.7543859649122806e-05, |
|
"loss": 0.0477, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.33281956008165325, |
|
"eval_accuracy_undropoff": 0.9929140666900834, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28609114317372564, |
|
"eval_iou_undropoff": 0.9649832592793454, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1285596638917923, |
|
"eval_mean_accuracy": 0.6628668133858684, |
|
"eval_mean_iou": 0.6255372012265354, |
|
"eval_overall_accuracy": 0.9654678344726563, |
|
"eval_runtime": 8.3549, |
|
"eval_samples_per_second": 2.394, |
|
"eval_steps_per_second": 0.239, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 80.5, |
|
"learning_rate": 1.7324561403508773e-05, |
|
"loss": 0.0409, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 1.7105263157894737e-05, |
|
"loss": 0.0416, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 81.5, |
|
"learning_rate": 1.6885964912280704e-05, |
|
"loss": 0.0381, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0392, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 82.5, |
|
"learning_rate": 1.6447368421052635e-05, |
|
"loss": 0.0398, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 1.62280701754386e-05, |
|
"loss": 0.0416, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 83.5, |
|
"learning_rate": 1.6008771929824562e-05, |
|
"loss": 0.0389, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 1.5789473684210526e-05, |
|
"loss": 0.0398, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 84.5, |
|
"learning_rate": 1.5570175438596493e-05, |
|
"loss": 0.0402, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 1.5350877192982457e-05, |
|
"loss": 0.039, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"eval_accuracy_dropoff": 0.37348104314319136, |
|
"eval_accuracy_undropoff": 0.9912656707566442, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.31088955415374747, |
|
"eval_iou_undropoff": 0.9650356885247457, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12654618918895721, |
|
"eval_mean_accuracy": 0.6823733569499177, |
|
"eval_mean_iou": 0.6379626213392466, |
|
"eval_overall_accuracy": 0.9655786514282226, |
|
"eval_runtime": 2.7731, |
|
"eval_samples_per_second": 7.212, |
|
"eval_steps_per_second": 0.721, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 85.5, |
|
"learning_rate": 1.5131578947368422e-05, |
|
"loss": 0.0394, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 1.4912280701754386e-05, |
|
"loss": 0.0377, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 1.4692982456140353e-05, |
|
"loss": 0.0393, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 1.4473684210526317e-05, |
|
"loss": 0.0379, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 87.5, |
|
"learning_rate": 1.425438596491228e-05, |
|
"loss": 0.0387, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.0388, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 1.3815789473684213e-05, |
|
"loss": 0.0381, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.3596491228070177e-05, |
|
"loss": 0.0362, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 1.337719298245614e-05, |
|
"loss": 0.0378, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.3157894736842106e-05, |
|
"loss": 0.0378, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.3154154911809904, |
|
"eval_accuracy_undropoff": 0.9931711870022896, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2725190143831572, |
|
"eval_iou_undropoff": 0.9645253754212552, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.13092683255672455, |
|
"eval_mean_accuracy": 0.65429333909164, |
|
"eval_mean_iou": 0.6185221949022062, |
|
"eval_overall_accuracy": 0.9649906158447266, |
|
"eval_runtime": 2.7934, |
|
"eval_samples_per_second": 7.16, |
|
"eval_steps_per_second": 0.716, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 90.5, |
|
"learning_rate": 1.2938596491228071e-05, |
|
"loss": 0.0369, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.2719298245614037e-05, |
|
"loss": 0.0387, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 91.5, |
|
"learning_rate": 1.25e-05, |
|
"loss": 0.0371, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.2280701754385964e-05, |
|
"loss": 0.0356, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 92.5, |
|
"learning_rate": 1.206140350877193e-05, |
|
"loss": 0.0377, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.1842105263157895e-05, |
|
"loss": 0.0337, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 93.5, |
|
"learning_rate": 1.162280701754386e-05, |
|
"loss": 0.038, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.1403508771929824e-05, |
|
"loss": 0.0346, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 94.5, |
|
"learning_rate": 1.118421052631579e-05, |
|
"loss": 0.036, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.0964912280701754e-05, |
|
"loss": 0.0362, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"eval_accuracy_dropoff": 0.3507511640175233, |
|
"eval_accuracy_undropoff": 0.992195642288331, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.297273444759361, |
|
"eval_iou_undropoff": 0.9650146387690659, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12657637894153595, |
|
"eval_mean_accuracy": 0.6714734031529271, |
|
"eval_mean_iou": 0.6311440417642135, |
|
"eval_overall_accuracy": 0.9655248641967773, |
|
"eval_runtime": 4.6147, |
|
"eval_samples_per_second": 4.334, |
|
"eval_steps_per_second": 0.433, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 95.5, |
|
"learning_rate": 1.074561403508772e-05, |
|
"loss": 0.0373, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.0358, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 96.5, |
|
"learning_rate": 1.030701754385965e-05, |
|
"loss": 0.0372, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.0087719298245614e-05, |
|
"loss": 0.0371, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 97.5, |
|
"learning_rate": 9.868421052631579e-06, |
|
"loss": 0.0363, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.0376, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 98.5, |
|
"learning_rate": 9.42982456140351e-06, |
|
"loss": 0.0371, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 9.210526315789474e-06, |
|
"loss": 0.0427, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 99.5, |
|
"learning_rate": 8.991228070175439e-06, |
|
"loss": 0.0334, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 8.771929824561403e-06, |
|
"loss": 0.0394, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.33373701231679626, |
|
"eval_accuracy_undropoff": 0.993359051998205, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28943170290215425, |
|
"eval_iou_undropoff": 0.9654530730096186, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1306542307138443, |
|
"eval_mean_accuracy": 0.6635480321575006, |
|
"eval_mean_iou": 0.6274423879558864, |
|
"eval_overall_accuracy": 0.9659324645996094, |
|
"eval_runtime": 11.5796, |
|
"eval_samples_per_second": 1.727, |
|
"eval_steps_per_second": 0.173, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 100.5, |
|
"learning_rate": 8.552631578947368e-06, |
|
"loss": 0.0349, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.037, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 101.5, |
|
"learning_rate": 8.1140350877193e-06, |
|
"loss": 0.0345, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 7.894736842105263e-06, |
|
"loss": 0.0371, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 102.5, |
|
"learning_rate": 7.675438596491228e-06, |
|
"loss": 0.0355, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 7.456140350877193e-06, |
|
"loss": 0.0346, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 103.5, |
|
"learning_rate": 7.236842105263158e-06, |
|
"loss": 0.0363, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.0348, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 104.5, |
|
"learning_rate": 6.798245614035088e-06, |
|
"loss": 0.0351, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 6.578947368421053e-06, |
|
"loss": 0.0362, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"eval_accuracy_dropoff": 0.3660680290832359, |
|
"eval_accuracy_undropoff": 0.9918169271535567, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3079764119267967, |
|
"eval_iou_undropoff": 0.965270142099317, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12709322571754456, |
|
"eval_mean_accuracy": 0.6789424781183964, |
|
"eval_mean_iou": 0.6366232770130569, |
|
"eval_overall_accuracy": 0.9657987594604492, |
|
"eval_runtime": 2.7866, |
|
"eval_samples_per_second": 7.177, |
|
"eval_steps_per_second": 0.718, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 105.5, |
|
"learning_rate": 6.3596491228070184e-06, |
|
"loss": 0.0347, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 6.140350877192982e-06, |
|
"loss": 0.036, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 106.5, |
|
"learning_rate": 5.921052631578948e-06, |
|
"loss": 0.0355, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 107.0, |
|
"learning_rate": 5.701754385964912e-06, |
|
"loss": 0.0345, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 107.5, |
|
"learning_rate": 5.482456140350877e-06, |
|
"loss": 0.0334, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 108.0, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 0.0391, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 108.5, |
|
"learning_rate": 5.043859649122807e-06, |
|
"loss": 0.0352, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 109.0, |
|
"learning_rate": 4.824561403508772e-06, |
|
"loss": 0.0343, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 109.5, |
|
"learning_rate": 4.605263157894737e-06, |
|
"loss": 0.0331, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"learning_rate": 4.3859649122807014e-06, |
|
"loss": 0.0361, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.0, |
|
"eval_accuracy_dropoff": 0.35535677423794126, |
|
"eval_accuracy_undropoff": 0.9917641896282203, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29865949055243485, |
|
"eval_iou_undropoff": 0.9647824937168638, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12739057838916779, |
|
"eval_mean_accuracy": 0.6735604819330808, |
|
"eval_mean_iou": 0.6317209921346493, |
|
"eval_overall_accuracy": 0.965302848815918, |
|
"eval_runtime": 2.8789, |
|
"eval_samples_per_second": 6.947, |
|
"eval_steps_per_second": 0.695, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 4.166666666666667e-06, |
|
"loss": 0.0354, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 111.0, |
|
"learning_rate": 3.9473684210526315e-06, |
|
"loss": 0.0334, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 111.5, |
|
"learning_rate": 3.7280701754385965e-06, |
|
"loss": 0.0333, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 112.0, |
|
"learning_rate": 3.5087719298245615e-06, |
|
"loss": 0.0401, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 112.5, |
|
"learning_rate": 3.2894736842105265e-06, |
|
"loss": 0.0313, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 113.0, |
|
"learning_rate": 3.070175438596491e-06, |
|
"loss": 0.0416, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 113.5, |
|
"learning_rate": 2.850877192982456e-06, |
|
"loss": 0.0349, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 114.0, |
|
"learning_rate": 2.631578947368421e-06, |
|
"loss": 0.0357, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 114.5, |
|
"learning_rate": 2.412280701754386e-06, |
|
"loss": 0.0329, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"learning_rate": 2.1929824561403507e-06, |
|
"loss": 0.0353, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.0, |
|
"eval_accuracy_dropoff": 0.32280556893506734, |
|
"eval_accuracy_undropoff": 0.9930850158759852, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2784262212058146, |
|
"eval_iou_undropoff": 0.9647420691308475, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12896691262722015, |
|
"eval_mean_accuracy": 0.6579452924055262, |
|
"eval_mean_iou": 0.621584145168331, |
|
"eval_overall_accuracy": 0.9652153015136719, |
|
"eval_runtime": 5.6606, |
|
"eval_samples_per_second": 3.533, |
|
"eval_steps_per_second": 0.353, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 115.5, |
|
"learning_rate": 1.9736842105263157e-06, |
|
"loss": 0.0334, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 116.0, |
|
"learning_rate": 1.7543859649122807e-06, |
|
"loss": 0.0372, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 116.5, |
|
"learning_rate": 1.5350877192982455e-06, |
|
"loss": 0.0356, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 117.0, |
|
"learning_rate": 1.3157894736842106e-06, |
|
"loss": 0.033, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 117.5, |
|
"learning_rate": 1.0964912280701754e-06, |
|
"loss": 0.0339, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 118.0, |
|
"learning_rate": 8.771929824561404e-07, |
|
"loss": 0.0341, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 118.5, |
|
"learning_rate": 6.578947368421053e-07, |
|
"loss": 0.0386, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 119.0, |
|
"learning_rate": 4.385964912280702e-07, |
|
"loss": 0.034, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 119.5, |
|
"learning_rate": 2.192982456140351e-07, |
|
"loss": 0.0335, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"learning_rate": 0.0, |
|
"loss": 0.0344, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 120.0, |
|
"eval_accuracy_dropoff": 0.33194339319709165, |
|
"eval_accuracy_undropoff": 0.9926412246250411, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28380371178011704, |
|
"eval_iou_undropoff": 0.9646824554135581, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.12955032289028168, |
|
"eval_mean_accuracy": 0.6622923089110664, |
|
"eval_mean_iou": 0.6242430835968376, |
|
"eval_overall_accuracy": 0.9651699066162109, |
|
"eval_runtime": 11.3006, |
|
"eval_samples_per_second": 1.77, |
|
"eval_steps_per_second": 0.177, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 240, |
|
"num_train_epochs": 120, |
|
"total_flos": 2.778548611716219e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|