{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.39933642745018005, "min": 0.3950047194957733, "max": 1.5384083986282349, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 11986.482421875, "min": 11831.181640625, "max": 46669.15625, "count": 33 }, "Pyramids.Step.mean": { "value": 989967.0, "min": 29952.0, "max": 989967.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989967.0, "min": 29952.0, "max": 989967.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.48026666045188904, "min": -0.10411398857831955, "max": 0.49897605180740356, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 126.79039764404297, "min": -24.98735809326172, "max": 137.21841430664062, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.001223506173118949, "min": 0.0007038189214654267, "max": 0.23235321044921875, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 0.3230056166648865, "min": 0.19003111124038696, "max": 55.494178771972656, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.0664521598706155, "min": 0.0664521598706155, "max": 0.07485395115163616, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9303302381886168, "min": 0.4892200856083528, "max": 1.0790150402337493, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.01297132051279602, "min": 0.0005168138330218805, "max": 0.013696827958310244, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.18159848717914429, "min": 0.006653847178888267, "max": 0.2017406280115121, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.5703403337285735e-06, "min": 7.5703403337285735e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010598476467220002, "min": 0.00010598476467220002, "max": 0.0036331990889337, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1025234142857143, "min": 0.1025234142857143, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4353278000000003, "min": 1.3691136000000002, "max": 2.6110663000000005, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.0002620890871428573, "min": 0.0002620890871428573, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.0036692472200000018, "min": 0.0036692472200000018, "max": 0.12112552337, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.01062154583632946, "min": 0.01062154583632946, "max": 0.3217851221561432, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.14870163798332214, "min": 0.14870163798332214, "max": 2.252495765686035, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 410.82608695652175, "min": 383.49367088607596, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 28347.0, "min": 15984.0, "max": 34175.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.4708176258513157, "min": -1.0000000521540642, "max": 1.5148399807016055, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 100.01559855788946, "min": -32.000001668930054, "max": 121.11479805409908, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.4708176258513157, "min": -1.0000000521540642, "max": 1.5148399807016055, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 100.01559855788946, "min": -32.000001668930054, "max": 121.11479805409908, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.04582691469198942, "min": 0.04509447946302316, "max": 6.73990511149168, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.1162301990552805, "min": 3.1162301990552805, "max": 107.83848178386688, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1679333537", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1679335621" }, "total": 2083.614686453, "count": 1, "self": 0.803465302999939, "children": { "run_training.setup": { "total": 0.10485913100001198, "count": 1, "self": 0.10485913100001198 }, "TrainerController.start_learning": { "total": 2082.706362019, "count": 1, "self": 1.2908663250382233, "children": { "TrainerController._reset_env": { "total": 9.41941091199999, "count": 1, "self": 9.41941091199999 }, "TrainerController.advance": { "total": 2071.8587819369623, "count": 63649, "self": 1.4642750949569745, "children": { "env_step": { "total": 1441.4127156380073, "count": 63649, "self": 1335.8806393190698, "children": { "SubprocessEnvManager._take_step": { "total": 104.75996937597449, "count": 63649, "self": 4.505048939957362, "children": { "TorchPolicy.evaluate": { "total": 100.25492043601713, "count": 62570, "self": 100.25492043601713 } } }, "workers": { "total": 0.7721069429630347, "count": 63649, "self": 0.0, "children": { "worker_root": { "total": 2078.030785964965, "count": 63649, "is_parallel": true, "self": 853.8231536009669, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.005757275999997091, "count": 1, "is_parallel": true, "self": 0.004350639999984196, "children": { "_process_rank_one_or_two_observation": { "total": 0.0014066360000128952, "count": 8, "is_parallel": true, "self": 0.0014066360000128952 } } }, "UnityEnvironment.step": { "total": 0.058280125999999655, "count": 1, "is_parallel": true, "self": 0.0005637059998662153, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004228080000530099, "count": 1, "is_parallel": true, "self": 0.0004228080000530099 }, "communicator.exchange": { "total": 0.05554716800003234, "count": 1, "is_parallel": true, "self": 0.05554716800003234 }, "steps_from_proto": { "total": 0.0017464440000480863, "count": 1, "is_parallel": true, "self": 0.00038079900002685463, "children": { "_process_rank_one_or_two_observation": { "total": 0.0013656450000212317, "count": 8, "is_parallel": true, "self": 0.0013656450000212317 } } } } } } }, "UnityEnvironment.step": { "total": 1224.2076323639983, "count": 63648, "is_parallel": true, "self": 31.028065578996802, "children": { "UnityEnvironment._generate_step_input": { "total": 22.502084424017653, "count": 63648, "is_parallel": true, "self": 22.502084424017653 }, "communicator.exchange": { "total": 1080.1595737529838, "count": 63648, "is_parallel": true, "self": 1080.1595737529838 }, "steps_from_proto": { "total": 90.51790860800031, "count": 63648, "is_parallel": true, "self": 18.998207732006563, "children": { "_process_rank_one_or_two_observation": { "total": 71.51970087599375, "count": 509184, "is_parallel": true, "self": 71.51970087599375 } } } } } } } } } } }, "trainer_advance": { "total": 628.981791203998, "count": 63649, "self": 2.411050936017091, "children": { "process_trajectory": { "total": 114.72007029098398, "count": 63649, "self": 114.45804867798387, "children": { "RLTrainer._checkpoint": { "total": 0.2620216130001154, "count": 2, "self": 0.2620216130001154 } } }, "_update_policy": { "total": 511.850669976997, "count": 451, "self": 325.08055114102694, "children": { "TorchPPOOptimizer.update": { "total": 186.77011883597004, "count": 22860, "self": 186.77011883597004 } } } } } } }, "trainer_threads": { "total": 1.207999957841821e-06, "count": 1, "self": 1.207999957841821e-06 }, "TrainerController._save_models": { "total": 0.13730163699983677, "count": 1, "self": 0.0018982979995598726, "children": { "RLTrainer._checkpoint": { "total": 0.1354033390002769, "count": 1, "self": 0.1354033390002769 } } } } } } }