{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4067370891571045, "min": 1.4067370891571045, "max": 1.426994800567627, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69045.46875, "min": 69045.46875, "max": 76912.9296875, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 84.12947189097103, "min": 78.64331210191082, "max": 405.6178861788618, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49384.0, "min": 48711.0, "max": 50190.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999967.0, "min": 49777.0, "max": 1999967.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999967.0, "min": 49777.0, "max": 1999967.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4909980297088623, "min": 0.006977755110710859, "max": 2.4909980297088623, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1462.2158203125, "min": 0.8512861132621765, "max": 1508.9130859375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.940634371373276, "min": 1.758558879927045, "max": 3.987925786396553, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2313.152375996113, "min": 214.5441833510995, "max": 2326.1923151016235, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.940634371373276, "min": 1.758558879927045, "max": 3.987925786396553, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2313.152375996113, "min": 214.5441833510995, "max": 2326.1923151016235, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.014846888736873452, "min": 0.011563819564778694, "max": 0.02019818467864146, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.044540666210620354, "min": 0.02871656273055123, "max": 0.060594554035924376, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05978891435596678, "min": 0.02295945438866814, "max": 0.06050440731147925, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.17936674306790035, "min": 0.04591890877733628, "max": 0.18151322193443775, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.482398839233338e-06, "min": 3.482398839233338e-06, "max": 0.00029528992657002493, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.0447196517700015e-05, "min": 1.0447196517700015e-05, "max": 0.0008437954687348499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10116076666666667, "min": 0.10116076666666667, "max": 0.19842997500000006, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3034823, "min": 0.20746140000000002, "max": 0.5812651500000001, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.792225666666675e-05, "min": 6.792225666666675e-05, "max": 0.0049216557524999986, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00020376677000000026, "min": 0.00020376677000000026, "max": 0.014065130985000004, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1671804015", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1671806157" }, "total": 2142.183894565, "count": 1, "self": 0.3904891930001213, "children": { "run_training.setup": { "total": 0.10464533399999709, "count": 1, "self": 0.10464533399999709 }, "TrainerController.start_learning": { "total": 2141.688760038, "count": 1, "self": 3.777357831005247, "children": { "TrainerController._reset_env": { "total": 7.584603255000047, "count": 1, "self": 7.584603255000047 }, "TrainerController.advance": { "total": 2130.2127204489952, "count": 232364, "self": 4.093682628999886, "children": { "env_step": { "total": 1668.9810511449157, "count": 232364, "self": 1401.8518345910045, "children": { "SubprocessEnvManager._take_step": { "total": 264.6395726289147, "count": 232364, "self": 13.714421345018764, "children": { "TorchPolicy.evaluate": { "total": 250.92515128389596, "count": 222836, "self": 62.38086275392038, "children": { "TorchPolicy.sample_actions": { "total": 188.54428852997557, "count": 222836, "self": 188.54428852997557 } } } } }, "workers": { "total": 2.4896439249964715, "count": 232364, "self": 0.0, "children": { "worker_root": { "total": 2133.947571688959, "count": 232364, "is_parallel": true, "self": 985.3135514839059, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.001996748999999909, "count": 1, "is_parallel": true, "self": 0.00036290299976826645, "children": { "_process_rank_one_or_two_observation": { "total": 0.0016338460002316424, "count": 2, "is_parallel": true, "self": 0.0016338460002316424 } } }, "UnityEnvironment.step": { "total": 0.028696611999976085, "count": 1, "is_parallel": true, "self": 0.0002803699999276432, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00017002499998852727, "count": 1, "is_parallel": true, "self": 0.00017002499998852727 }, "communicator.exchange": { "total": 0.027534660999890548, "count": 1, "is_parallel": true, "self": 0.027534660999890548 }, "steps_from_proto": { "total": 0.0007115560001693666, "count": 1, "is_parallel": true, "self": 0.0002457760001561837, "children": { "_process_rank_one_or_two_observation": { "total": 0.00046578000001318287, "count": 2, "is_parallel": true, "self": 0.00046578000001318287 } } } } } } }, "UnityEnvironment.step": { "total": 1148.6340202050533, "count": 232363, "is_parallel": true, "self": 33.6633536530278, "children": { "UnityEnvironment._generate_step_input": { "total": 74.43750417795536, "count": 232363, "is_parallel": true, "self": 74.43750417795536 }, "communicator.exchange": { "total": 950.4464918921119, "count": 232363, "is_parallel": true, "self": 950.4464918921119 }, "steps_from_proto": { "total": 90.08667048195821, "count": 232363, "is_parallel": true, "self": 36.73845348303985, "children": { "_process_rank_one_or_two_observation": { "total": 53.34821699891836, "count": 464726, "is_parallel": true, "self": 53.34821699891836 } } } } } } } } } } }, "trainer_advance": { "total": 457.1379866750797, "count": 232364, "self": 6.0412287139108685, "children": { "process_trajectory": { "total": 144.2575135721686, "count": 232364, "self": 142.97054820816857, "children": { "RLTrainer._checkpoint": { "total": 1.2869653640000251, "count": 10, "self": 1.2869653640000251 } } }, "_update_policy": { "total": 306.8392443890002, "count": 97, "self": 253.10292349499377, "children": { "TorchPPOOptimizer.update": { "total": 53.73632089400644, "count": 2910, "self": 53.73632089400644 } } } } } } }, "trainer_threads": { "total": 8.399997568631079e-07, "count": 1, "self": 8.399997568631079e-07 }, "TrainerController._save_models": { "total": 0.11407766299998912, "count": 1, "self": 0.0018953809999402438, "children": { "RLTrainer._checkpoint": { "total": 0.11218228200004887, "count": 1, "self": 0.11218228200004887 } } } } } } }