{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.7056046724319458, "min": 0.6583653092384338, "max": 0.864764392375946, "count": 13 }, "Pyramids.Policy.Entropy.sum": { "value": 21021.375, "min": 13230.509765625, "max": 25915.259765625, "count": 13 }, "Pyramids.Step.mean": { "value": 479900.0, "min": 119893.0, "max": 479900.0, "count": 13 }, "Pyramids.Step.sum": { "value": 479900.0, "min": 119893.0, "max": 479900.0, "count": 13 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": -0.05161866545677185, "min": -0.09998025000095367, "max": -0.024935798719525337, "count": 13 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": -12.543335914611816, "min": -24.195220947265625, "max": -6.034463405609131, "count": 13 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.04084056615829468, "min": 0.036658503115177155, "max": 0.14763811230659485, "count": 13 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 9.924257278442383, "min": 8.908016204833984, "max": 29.460866928100586, "count": 13 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06987683399543244, "min": 0.0643047431407396, "max": 0.07267828293145887, "count": 13 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9782756759360542, "min": 0.4169106374361111, "max": 1.0174959610404242, "count": 13 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.002951446809155888, "min": 0.0009379414693428374, "max": 0.004382461116580478, "count": 13 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.04132025532818243, "min": 0.012275871942457188, "max": 0.04132025532818243, "count": 13 }, "Pyramids.Policy.LearningRate.mean": { "value": 2.1505849974271425e-05, "min": 2.1505849974271425e-05, "max": 0.0002330925223025, "count": 13 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00030108189963979997, "min": 0.00030108189963979997, "max": 0.0026326749224418, "count": 13 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10716858571428571, "min": 0.10716858571428571, "max": 0.17769749999999998, "count": 13 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.5003602, "min": 1.066185, "max": 2.1708439999999998, "count": 13 }, "Pyramids.Policy.Beta.mean": { "value": 0.000726141712857143, "min": 0.000726141712857143, "max": 0.00777198025, "count": 13 }, "Pyramids.Policy.Beta.sum": { "value": 0.010165983980000003, "min": 0.010165983980000003, "max": 0.08778806418, "count": 13 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.035825785249471664, "min": 0.035825785249471664, "max": 0.1192840039730072, "count": 13 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.5015609860420227, "min": 0.5015609860420227, "max": 1.1114671230316162, "count": 13 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 951.258064516129, "min": 886.2, "max": 981.9677419354839, "count": 13 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 29489.0, "min": 15241.0, "max": 32866.0, "count": 13 }, "Pyramids.Environment.CumulativeReward.mean": { "value": -0.5001677933239168, "min": -0.7891161785491051, "max": -0.5001677933239168, "count": 13 }, "Pyramids.Environment.CumulativeReward.sum": { "value": -15.50520159304142, "min": -25.175801686942577, "max": -11.255000829696655, "count": 13 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": -0.5001677933239168, "min": -0.7891161785491051, "max": -0.5001677933239168, "count": 13 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": -15.50520159304142, "min": -25.175801686942577, "max": -11.255000829696655, "count": 13 }, "Pyramids.Policy.RndReward.mean": { "value": 0.3513112754651135, "min": 0.34339476924276713, "max": 1.1966151699889451, "count": 13 }, "Pyramids.Policy.RndReward.sum": { "value": 10.890649539418519, "min": 10.890649539418519, "max": 30.938270511105657, "count": 13 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 13 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 13 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1705239387", "python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --resume --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.2+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1705240397" }, "total": 1010.7697038460001, "count": 1, "self": 0.5343345270002828, "children": { "run_training.setup": { "total": 0.07936862899987318, "count": 1, "self": 0.07936862899987318 }, "TrainerController.start_learning": { "total": 1010.1560006899999, "count": 1, "self": 0.8612357779361446, "children": { "TrainerController._reset_env": { "total": 2.5432331020001584, "count": 1, "self": 2.5432331020001584 }, "TrainerController.advance": { "total": 1006.6557438700638, "count": 25284, "self": 0.8797703000514048, "children": { "env_step": { "total": 716.3736337799842, "count": 25284, "self": 646.0934776119288, "children": { "SubprocessEnvManager._take_step": { "total": 69.76227552601722, "count": 25284, "self": 2.4712620050158876, "children": { "TorchPolicy.evaluate": { "total": 67.29101352100133, "count": 25079, "self": 67.29101352100133 } } }, "workers": { "total": 0.5178806420381079, "count": 25284, "self": 0.0, "children": { "worker_root": { "total": 1007.5676659870073, "count": 25284, "is_parallel": true, "self": 424.2065156460362, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0019457329999568174, "count": 1, "is_parallel": true, "self": 0.0006246790003388014, "children": { "_process_rank_one_or_two_observation": { "total": 0.001321053999618016, "count": 8, "is_parallel": true, "self": 0.001321053999618016 } } }, "UnityEnvironment.step": { "total": 0.05678652799997508, "count": 1, "is_parallel": true, "self": 0.0006430970001929381, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0005156469997018576, "count": 1, "is_parallel": true, "self": 0.0005156469997018576 }, "communicator.exchange": { "total": 0.05363002100011727, "count": 1, "is_parallel": true, "self": 0.05363002100011727 }, "steps_from_proto": { "total": 0.0019977629999630153, "count": 1, "is_parallel": true, "self": 0.0004087940005774726, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015889689993855427, "count": 8, "is_parallel": true, "self": 0.0015889689993855427 } } } } } } }, "UnityEnvironment.step": { "total": 583.3611503409711, "count": 25283, "is_parallel": true, "self": 17.19282746386625, "children": { "UnityEnvironment._generate_step_input": { "total": 12.224053933023242, "count": 25283, "is_parallel": true, "self": 12.224053933023242 }, "communicator.exchange": { "total": 503.96474366005714, "count": 25283, "is_parallel": true, "self": 503.96474366005714 }, "steps_from_proto": { "total": 49.979525284024476, "count": 25283, "is_parallel": true, "self": 10.445383648968345, "children": { "_process_rank_one_or_two_observation": { "total": 39.53414163505613, "count": 202264, "is_parallel": true, "self": 39.53414163505613 } } } } } } } } } } }, "trainer_advance": { "total": 289.40233979002824, "count": 25284, "self": 1.5130194670500714, "children": { "process_trajectory": { "total": 58.767308293973656, "count": 25284, "self": 58.64867308097382, "children": { "RLTrainer._checkpoint": { "total": 0.11863521299983404, "count": 1, "self": 0.11863521299983404 } } }, "_update_policy": { "total": 229.1220120290045, "count": 179, "self": 136.33589196801813, "children": { "TorchPPOOptimizer.update": { "total": 92.78612006098638, "count": 9111, "self": 92.78612006098638 } } } } } } }, "trainer_threads": { "total": 1.0459998520673253e-06, "count": 1, "self": 1.0459998520673253e-06 }, "TrainerController._save_models": { "total": 0.0957868939999571, "count": 1, "self": 0.0022664190000796225, "children": { "RLTrainer._checkpoint": { "total": 0.09352047499987748, "count": 1, "self": 0.09352047499987748 } } } } } } }