{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.8661801218986511, "min": 0.8661801218986511, "max": 2.8671863079071045, "count": 20 }, "SnowballTarget.Policy.Entropy.sum": { "value": 8260.759765625, "min": 8260.759765625, "max": 29331.31640625, "count": 20 }, "SnowballTarget.Step.mean": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Step.sum": { "value": 199984.0, "min": 9952.0, "max": 199984.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.57765007019043, "min": 0.3534684181213379, "max": 12.57765007019043, "count": 20 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2452.641845703125, "min": 68.5728759765625, "max": 2551.47509765625, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 20 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 8756.0, "min": 8756.0, "max": 10945.0, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06332117746016735, "min": 0.06224734195141126, "max": 0.07526549113276368, "count": 20 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.2532847098406694, "min": 0.24898936780564504, "max": 0.3641580444419364, "count": 20 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.1916163418368966, "min": 0.1183634707601467, "max": 0.27517908621652454, "count": 20 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 0.7664653673475864, "min": 0.4734538830405868, "max": 1.3470000390912973, "count": 20 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 8.082097306000005e-06, "min": 8.082097306000005e-06, "max": 0.000291882002706, "count": 20 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.232838922400002e-05, "min": 3.232838922400002e-05, "max": 0.00138516003828, "count": 20 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10269400000000001, "min": 0.10269400000000001, "max": 0.19729400000000002, "count": 20 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.41077600000000003, "min": 0.41077600000000003, "max": 0.96172, "count": 20 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.0001444306000000001, "min": 0.0001444306000000001, "max": 0.0048649706, "count": 20 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0005777224000000004, "min": 0.0005777224000000004, "max": 0.023089828, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 24.727272727272727, "min": 3.2954545454545454, "max": 24.90909090909091, "count": 20 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1088.0, "min": 145.0, "max": 1364.0, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 24.727272727272727, "min": 3.2954545454545454, "max": 24.90909090909091, "count": 20 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1088.0, "min": 145.0, "max": 1364.0, "count": 20 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 20 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1689280363", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1689280822" }, "total": 459.4991406600001, "count": 1, "self": 0.7883720950001134, "children": { "run_training.setup": { "total": 0.04390275400001542, "count": 1, "self": 0.04390275400001542 }, "TrainerController.start_learning": { "total": 458.666865811, "count": 1, "self": 0.5404646510046405, "children": { "TrainerController._reset_env": { "total": 4.175219236000004, "count": 1, "self": 4.175219236000004 }, "TrainerController.advance": { "total": 453.7729951159954, "count": 18201, "self": 0.2497857179950529, "children": { "env_step": { "total": 453.52320939800035, "count": 18201, "self": 331.51856606400116, "children": { "SubprocessEnvManager._take_step": { "total": 121.74664670899952, "count": 18201, "self": 1.800205886001777, "children": { "TorchPolicy.evaluate": { "total": 119.94644082299774, "count": 18201, "self": 119.94644082299774 } } }, "workers": { "total": 0.257996624999663, "count": 18201, "self": 0.0, "children": { "worker_root": { "total": 457.1887970209993, "count": 18201, "is_parallel": true, "self": 217.23748615199753, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0054732780000108505, "count": 1, "is_parallel": true, "self": 0.004053217000006271, "children": { "_process_rank_one_or_two_observation": { "total": 0.00142006100000458, "count": 10, "is_parallel": true, "self": 0.00142006100000458 } } }, "UnityEnvironment.step": { "total": 0.0351252529999897, "count": 1, "is_parallel": true, "self": 0.0006155749999550153, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003560390000245661, "count": 1, "is_parallel": true, "self": 0.0003560390000245661 }, "communicator.exchange": { "total": 0.03199006899998835, "count": 1, "is_parallel": true, "self": 0.03199006899998835 }, "steps_from_proto": { "total": 0.002163570000021764, "count": 1, "is_parallel": true, "self": 0.00035682399999359404, "children": { "_process_rank_one_or_two_observation": { "total": 0.0018067460000281699, "count": 10, "is_parallel": true, "self": 0.0018067460000281699 } } } } } } }, "UnityEnvironment.step": { "total": 239.95131086900176, "count": 18200, "is_parallel": true, "self": 10.243952093997933, "children": { "UnityEnvironment._generate_step_input": { "total": 5.0487758370030065, "count": 18200, "is_parallel": true, "self": 5.0487758370030065 }, "communicator.exchange": { "total": 190.0908911170006, "count": 18200, "is_parallel": true, "self": 190.0908911170006 }, "steps_from_proto": { "total": 34.56769182100021, "count": 18200, "is_parallel": true, "self": 6.074410556983594, "children": { "_process_rank_one_or_two_observation": { "total": 28.493281264016616, "count": 182000, "is_parallel": true, "self": 28.493281264016616 } } } } } } } } } } } } }, "trainer_threads": { "total": 0.00016039499996622908, "count": 1, "self": 0.00016039499996622908, "children": { "thread_root": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "trainer_advance": { "total": 450.3136188350118, "count": 426631, "is_parallel": true, "self": 9.168731385984245, "children": { "process_trajectory": { "total": 244.35880183302746, "count": 426631, "is_parallel": true, "self": 243.17181827702748, "children": { "RLTrainer._checkpoint": { "total": 1.1869835559999729, "count": 4, "is_parallel": true, "self": 1.1869835559999729 } } }, "_update_policy": { "total": 196.7860856160001, "count": 90, "is_parallel": true, "self": 78.55267687899715, "children": { "TorchPPOOptimizer.update": { "total": 118.23340873700295, "count": 4587, "is_parallel": true, "self": 118.23340873700295 } } } } } } } } }, "TrainerController._save_models": { "total": 0.17802641299999777, "count": 1, "self": 0.001197604000026331, "children": { "RLTrainer._checkpoint": { "total": 0.17682880899997144, "count": 1, "self": 0.17682880899997144 } } } } } } }