ppo-Pyramids / run_logs /timers.json
apple9855's picture
First Push
5cbb457 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6309221386909485,
"min": 0.5517802238464355,
"max": 1.4528251886367798,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18826.716796875,
"min": 16588.720703125,
"max": 44072.90625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29922.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29922.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3538196086883545,
"min": -0.11884379386901855,
"max": 0.3822344243526459,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 93.05455780029297,
"min": -28.641353607177734,
"max": 102.05659484863281,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 11.376840591430664,
"min": -1.3340771198272705,
"max": 11.376840591430664,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2992.109130859375,
"min": -349.5281982421875,
"max": 2992.109130859375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06857131932132388,
"min": 0.06490586224910137,
"max": 0.0771923413734024,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9599984704985343,
"min": 0.6175387309872192,
"max": 1.0512245996667227,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 15.447681779934076,
"min": 0.00013634601190398436,
"max": 15.447681779934076,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 216.26754491907707,
"min": 0.0019088441666557812,
"max": 216.26754491907707,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.476076079435708e-06,
"min": 7.476076079435708e-06,
"max": 0.0002947656392447875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010466506511209992,
"min": 0.00010466506511209992,
"max": 0.003633341888886099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249199285714285,
"min": 0.10249199285714285,
"max": 0.1982552125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348878999999999,
"min": 1.4348878999999999,
"max": 2.6111139000000008,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025895008642857126,
"min": 0.00025895008642857126,
"max": 0.00982569572875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036253012099999975,
"min": 0.0036253012099999975,
"max": 0.12113027861,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015366087667644024,
"min": 0.014959233812987804,
"max": 0.5141723155975342,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21512523293495178,
"min": 0.2094292789697647,
"max": 4.113378524780273,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 511.0483870967742,
"min": 468.8,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31685.0,
"min": 17360.0,
"max": 33010.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2566819377854221,
"min": -0.9998774715969639,
"max": 1.3660644888877869,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 76.65759820491076,
"min": -30.996201619505882,
"max": 87.52459850907326,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2566819377854221,
"min": -0.9998774715969639,
"max": 1.3660644888877869,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 76.65759820491076,
"min": -30.996201619505882,
"max": 87.52459850907326,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08181005848579292,
"min": 0.07692921549799697,
"max": 9.933180696641406,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.990413567633368,
"min": 4.528399441449437,
"max": 178.7972525395453,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726196157",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726197912"
},
"total": 1755.266822136,
"count": 1,
"self": 0.3747607980003522,
"children": {
"run_training.setup": {
"total": 0.05434367299949372,
"count": 1,
"self": 0.05434367299949372
},
"TrainerController.start_learning": {
"total": 1754.8377176650001,
"count": 1,
"self": 1.5318910600963136,
"children": {
"TrainerController._reset_env": {
"total": 1.7687151840000297,
"count": 1,
"self": 1.7687151840000297
},
"TrainerController.advance": {
"total": 1751.4445529459026,
"count": 63488,
"self": 1.4111253877072159,
"children": {
"env_step": {
"total": 1107.4984813761184,
"count": 63488,
"self": 949.908416633848,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.65711663003913,
"count": 63488,
"self": 4.598251203101427,
"children": {
"TorchPolicy.evaluate": {
"total": 152.0588654269377,
"count": 62559,
"self": 152.0588654269377
}
}
},
"workers": {
"total": 0.9329481122313155,
"count": 63488,
"self": 0.0,
"children": {
"worker_root": {
"total": 1752.5316851629177,
"count": 63488,
"is_parallel": true,
"self": 907.5702652098689,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021440210002765525,
"count": 1,
"is_parallel": true,
"self": 0.0007046719993013539,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014393490009751986,
"count": 8,
"is_parallel": true,
"self": 0.0014393490009751986
}
}
},
"UnityEnvironment.step": {
"total": 0.03786469100032264,
"count": 1,
"is_parallel": true,
"self": 0.0004513160010901629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003284349995738012,
"count": 1,
"is_parallel": true,
"self": 0.0003284349995738012
},
"communicator.exchange": {
"total": 0.03581220199976087,
"count": 1,
"is_parallel": true,
"self": 0.03581220199976087
},
"steps_from_proto": {
"total": 0.0012727379998977995,
"count": 1,
"is_parallel": true,
"self": 0.0002801259988700622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009926120010277373,
"count": 8,
"is_parallel": true,
"self": 0.0009926120010277373
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 844.9614199530488,
"count": 63487,
"is_parallel": true,
"self": 22.90857045895882,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.261316163030642,
"count": 63487,
"is_parallel": true,
"self": 14.261316163030642
},
"communicator.exchange": {
"total": 742.4403083370689,
"count": 63487,
"is_parallel": true,
"self": 742.4403083370689
},
"steps_from_proto": {
"total": 65.3512249939904,
"count": 63487,
"is_parallel": true,
"self": 14.321769730003325,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.029455263987074,
"count": 507896,
"is_parallel": true,
"self": 51.029455263987074
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 642.534946182077,
"count": 63488,
"self": 2.749910809024186,
"children": {
"process_trajectory": {
"total": 120.78061354705642,
"count": 63488,
"self": 120.57818172905718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2024318179992406,
"count": 2,
"self": 0.2024318179992406
}
}
},
"_update_policy": {
"total": 519.0044218259964,
"count": 457,
"self": 288.1165769280642,
"children": {
"TorchPPOOptimizer.update": {
"total": 230.88784489793215,
"count": 22767,
"self": 230.88784489793215
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2090004020137712e-06,
"count": 1,
"self": 1.2090004020137712e-06
},
"TrainerController._save_models": {
"total": 0.09255726600076741,
"count": 1,
"self": 0.0014655570003014873,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09109170900046593,
"count": 1,
"self": 0.09109170900046593
}
}
}
}
}
}
}