{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.16631320118904114, "min": 0.15615378320217133, "max": 0.5242581367492676, "count": 67 }, "Pyramids.Policy.Entropy.sum": { "value": 4960.125, "min": 4648.802734375, "max": 15685.8037109375, "count": 67 }, "Pyramids.Step.mean": { "value": 2999900.0, "min": 1019969.0, "max": 2999900.0, "count": 67 }, "Pyramids.Step.sum": { "value": 2999900.0, "min": 1019969.0, "max": 2999900.0, "count": 67 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.7526808977127075, "min": 0.3403772711753845, "max": 0.8132508993148804, "count": 67 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 222.04086303710938, "min": 88.83847045898438, "max": 243.16201782226562, "count": 67 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": -0.015379424206912518, "min": -0.01909688487648964, "max": 0.06433786451816559, "count": 67 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": -4.536930084228516, "min": -4.536930084228516, "max": 17.37122344970703, "count": 67 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 267.4188034188034, "min": 219.87596899224806, "max": 531.948275862069, "count": 67 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 31288.0, "min": 18218.0, "max": 32184.0, "count": 67 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.7154820268480186, "min": 1.1231103220890308, "max": 1.7808692251260465, "count": 67 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 200.71139714121819, "min": 65.14039868116379, "max": 231.7359987795353, "count": 67 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.7154820268480186, "min": 1.1231103220890308, "max": 1.7808692251260465, "count": 67 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 200.71139714121819, "min": 65.14039868116379, "max": 231.7359987795353, "count": 67 }, "Pyramids.Policy.RndReward.mean": { "value": 0.01874066329833307, "min": 0.01625763152926363, "max": 0.051344343154000695, "count": 67 }, "Pyramids.Policy.RndReward.sum": { "value": 2.1926576059049694, "min": 1.8848839916317957, "max": 3.2472658004262485, "count": 67 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.07115753559905678, "min": 0.06439512960310485, "max": 0.07460549737852826, "count": 67 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9962054983867951, "min": 0.6714494764067543, "max": 1.0704061636865578, "count": 67 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.0143861130879183, "min": 0.010200310676979523, "max": 0.01712145668058102, "count": 67 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.2014055832308562, "min": 0.11399055966709662, "max": 0.2409040310788744, "count": 67 }, "Pyramids.Policy.LearningRate.mean": { "value": 1.4785280786190503e-06, "min": 1.4785280786190503e-06, "max": 0.00019894532257379261, "count": 67 }, "Pyramids.Policy.LearningRate.sum": { "value": 2.0699393100666706e-05, "min": 2.0699393100666706e-05, "max": 0.00281256056248, "count": 67 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.1004928095238095, "min": 0.1004928095238095, "max": 0.16631509629629632, "count": 67 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.406899333333333, "min": 1.406899333333333, "max": 2.4375199999999997, "count": 67 }, "Pyramids.Policy.Beta.mean": { "value": 5.9231671428571524e-05, "min": 5.9231671428571524e-05, "max": 0.00663487812, "count": 67 }, "Pyramids.Policy.Beta.sum": { "value": 0.0008292434000000013, "min": 0.0008292434000000013, "max": 0.093808248, "count": 67 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.006910291966050863, "min": 0.006267464254051447, "max": 0.01137463003396988, "count": 67 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.09674409031867981, "min": 0.08774449676275253, "max": 0.15356534719467163, "count": 67 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 67 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 67 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1698957740", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.1.0+cu118", "numpy_version": "1.23.5", "end_time_seconds": "1698960820" }, "total": 3079.4064024080003, "count": 1, "self": 0.32205639800076824, "children": { "run_training.setup": { "total": 0.043957591999969736, "count": 1, "self": 0.043957591999969736 }, "TrainerController.start_learning": { "total": 3079.0403884179996, "count": 1, "self": 2.1447504499651586, "children": { "TrainerController._reset_env": { "total": 3.3851331480000226, "count": 1, "self": 3.3851331480000226 }, "TrainerController.advance": { "total": 3073.441244140034, "count": 130477, "self": 2.250978221929472, "children": { "env_step": { "total": 2037.8935502071477, "count": 130477, "self": 1827.5517615943077, "children": { "SubprocessEnvManager._take_step": { "total": 209.03697606190553, "count": 130477, "self": 7.80014143979497, "children": { "TorchPolicy.evaluate": { "total": 201.23683462211056, "count": 125048, "self": 201.23683462211056 } } }, "workers": { "total": 1.3048125509344572, "count": 130477, "self": 0.0, "children": { "worker_root": { "total": 3075.507062067016, "count": 130477, "is_parallel": true, "self": 1409.19582373993, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0017288639996877464, "count": 1, "is_parallel": true, "self": 0.0005731000001105713, "children": { "_process_rank_one_or_two_observation": { "total": 0.0011557639995771751, "count": 8, "is_parallel": true, "self": 0.0011557639995771751 } } }, "UnityEnvironment.step": { "total": 0.035860842000147386, "count": 1, "is_parallel": true, "self": 0.0003879749992847792, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0003228530003980268, "count": 1, "is_parallel": true, "self": 0.0003228530003980268 }, "communicator.exchange": { "total": 0.03405229000009058, "count": 1, "is_parallel": true, "self": 0.03405229000009058 }, "steps_from_proto": { "total": 0.0010977240003740008, "count": 1, "is_parallel": true, "self": 0.0002821739994942618, "children": { "_process_rank_one_or_two_observation": { "total": 0.0008155500008797389, "count": 8, "is_parallel": true, "self": 0.0008155500008797389 } } } } } } }, "UnityEnvironment.step": { "total": 1666.311238327086, "count": 130476, "is_parallel": true, "self": 39.75793157228463, "children": { "UnityEnvironment._generate_step_input": { "total": 27.65938782695821, "count": 130476, "is_parallel": true, "self": 27.65938782695821 }, "communicator.exchange": { "total": 1485.3678151138424, "count": 130476, "is_parallel": true, "self": 1485.3678151138424 }, "steps_from_proto": { "total": 113.52610381400063, "count": 130476, "is_parallel": true, "self": 24.249074466750244, "children": { "_process_rank_one_or_two_observation": { "total": 89.27702934725039, "count": 1043808, "is_parallel": true, "self": 89.27702934725039 } } } } } } } } } } }, "trainer_advance": { "total": 1033.296715710957, "count": 130477, "self": 4.452361167000618, "children": { "process_trajectory": { "total": 201.96153584894728, "count": 130477, "self": 201.66451809094724, "children": { "RLTrainer._checkpoint": { "total": 0.29701775800003816, "count": 4, "self": 0.29701775800003816 } } }, "_update_policy": { "total": 826.8828186950091, "count": 946, "self": 482.3568385889894, "children": { "TorchPPOOptimizer.update": { "total": 344.5259801060197, "count": 45510, "self": 344.5259801060197 } } } } } } }, "trainer_threads": { "total": 9.90999978967011e-07, "count": 1, "self": 9.90999978967011e-07 }, "TrainerController._save_models": { "total": 0.06925968900031876, "count": 1, "self": 0.0016841140004544286, "children": { "RLTrainer._checkpoint": { "total": 0.06757557499986433, "count": 1, "self": 0.06757557499986433 } } } } } } }