ppo-Pyramids / run_logs /timers.json
JanGr's picture
First Push
81ecfcf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5533409714698792,
"min": 0.5533409714698792,
"max": 1.4533696174621582,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16352.3330078125,
"min": 16352.3330078125,
"max": 44089.421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.45215925574302673,
"min": -0.12473366409540176,
"max": 0.4700194299221039,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 122.98731994628906,
"min": -30.060813903808594,
"max": 128.3153076171875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.1458725929260254,
"min": -0.010487104766070843,
"max": 0.23142144083976746,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 39.677345275878906,
"min": -2.747621536254883,
"max": 55.77256774902344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07114407039419957,
"min": 0.06450997000815821,
"max": 0.07523651785760661,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9960169855187939,
"min": 0.5067951839772487,
"max": 1.0558866170222425,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016979277492695274,
"min": 0.00020537784560465923,
"max": 0.016979277492695274,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23770988489773384,
"min": 0.00266991199286057,
"max": 0.23770988489773384,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.545890341878571e-06,
"min": 7.545890341878571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001056424647863,
"min": 0.0001056424647863,
"max": 0.003372379375873599,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025152642857143,
"min": 0.1025152642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352137,
"min": 1.3886848,
"max": 2.4858562,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026127490214285714,
"min": 0.00026127490214285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036578486299999997,
"min": 0.0036578486299999997,
"max": 0.11243022736,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006968182511627674,
"min": 0.006931137293577194,
"max": 0.37385794520378113,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09755455702543259,
"min": 0.09703592211008072,
"max": 2.6170055866241455,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 398.7894736842105,
"min": 370.0133333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30308.0,
"min": 15984.0,
"max": 33494.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4432789253953255,
"min": -1.0000000521540642,
"max": 1.499184191619095,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 109.68919833004475,
"min": -30.39380171895027,
"max": 113.93799856305122,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4432789253953255,
"min": -1.0000000521540642,
"max": 1.499184191619095,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 109.68919833004475,
"min": -30.39380171895027,
"max": 113.93799856305122,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029132493057037016,
"min": 0.026908253610599786,
"max": 7.782940157689154,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2140694723348133,
"min": 2.0450272744055837,
"max": 124.52704252302647,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697479598",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697481743"
},
"total": 2144.3861871199997,
"count": 1,
"self": 0.4770425349997822,
"children": {
"run_training.setup": {
"total": 0.0445226570000159,
"count": 1,
"self": 0.0445226570000159
},
"TrainerController.start_learning": {
"total": 2143.864621928,
"count": 1,
"self": 1.4100475470163474,
"children": {
"TrainerController._reset_env": {
"total": 3.440619722000065,
"count": 1,
"self": 3.440619722000065
},
"TrainerController.advance": {
"total": 2138.9318324669835,
"count": 63571,
"self": 1.4410998470098093,
"children": {
"env_step": {
"total": 1509.7093147180287,
"count": 63571,
"self": 1378.100113011965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.72845820106204,
"count": 63571,
"self": 4.7543529771237445,
"children": {
"TorchPolicy.evaluate": {
"total": 125.9741052239383,
"count": 62561,
"self": 125.9741052239383
}
}
},
"workers": {
"total": 0.8807435050016466,
"count": 63571,
"self": 0.0,
"children": {
"worker_root": {
"total": 2139.145786640969,
"count": 63571,
"is_parallel": true,
"self": 884.2740653309529,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019167709999692306,
"count": 1,
"is_parallel": true,
"self": 0.000653697999950964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012630730000182666,
"count": 8,
"is_parallel": true,
"self": 0.0012630730000182666
}
}
},
"UnityEnvironment.step": {
"total": 0.09257338399993387,
"count": 1,
"is_parallel": true,
"self": 0.0006695749998471001,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005182120000881696,
"count": 1,
"is_parallel": true,
"self": 0.0005182120000881696
},
"communicator.exchange": {
"total": 0.089545003000012,
"count": 1,
"is_parallel": true,
"self": 0.089545003000012
},
"steps_from_proto": {
"total": 0.001840593999986595,
"count": 1,
"is_parallel": true,
"self": 0.00041147899992211023,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014291150000644848,
"count": 8,
"is_parallel": true,
"self": 0.0014291150000644848
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1254.8717213100163,
"count": 63570,
"is_parallel": true,
"self": 35.52129178104178,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.890411570005654,
"count": 63570,
"is_parallel": true,
"self": 25.890411570005654
},
"communicator.exchange": {
"total": 1089.1121541960138,
"count": 63570,
"is_parallel": true,
"self": 1089.1121541960138
},
"steps_from_proto": {
"total": 104.34786376295506,
"count": 63570,
"is_parallel": true,
"self": 21.22024869284087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.12761507011419,
"count": 508560,
"is_parallel": true,
"self": 83.12761507011419
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.7814179019449,
"count": 63571,
"self": 2.665183775985497,
"children": {
"process_trajectory": {
"total": 122.91377174596323,
"count": 63571,
"self": 122.72042595696325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19334578899997723,
"count": 2,
"self": 0.19334578899997723
}
}
},
"_update_policy": {
"total": 502.2024623799962,
"count": 450,
"self": 296.2776769729769,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.92478540701927,
"count": 22812,
"self": 205.92478540701927
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.380000847158954e-07,
"count": 1,
"self": 9.380000847158954e-07
},
"TrainerController._save_models": {
"total": 0.08212125399995784,
"count": 1,
"self": 0.0013970499999231833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08072420400003466,
"count": 1,
"self": 0.08072420400003466
}
}
}
}
}
}
}