ppo-pyramid / run_logs /timers.json
robotfarmer's picture
First Push
65d383d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39683642983436584,
"min": 0.3928750157356262,
"max": 1.4675036668777466,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11835.25,
"min": 11835.25,
"max": 44518.19140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5897131562232971,
"min": -0.09040573984384537,
"max": 0.66984623670578,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 166.29910278320312,
"min": -21.697378158569336,
"max": 192.91571044921875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0012653418816626072,
"min": -0.01722562499344349,
"max": 0.2303726077079773,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.35682642459869385,
"min": -4.8059492111206055,
"max": 55.519798278808594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07149178699790161,
"min": 0.06583988447391549,
"max": 0.0742858538772684,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0008850179706226,
"min": 0.5143103057962852,
"max": 1.0802477465961902,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01706551619489411,
"min": 0.0005108097153346259,
"max": 0.017465132610774235,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23891722672851756,
"min": 0.005618906868680886,
"max": 0.24599706401932053,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.454326086685718e-06,
"min": 7.454326086685718e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010436056521360005,
"min": 0.00010436056521360005,
"max": 0.0033805418731528,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248474285714286,
"min": 0.10248474285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347864000000001,
"min": 1.3886848,
"max": 2.527710000000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002582258114285716,
"min": 0.0002582258114285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036151613600000024,
"min": 0.0036151613600000024,
"max": 0.11271203528000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007738643325865269,
"min": 0.007738643325865269,
"max": 0.4328215420246124,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10834100842475891,
"min": 0.10834100842475891,
"max": 3.0297508239746094,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 311.61,
"min": 277.0183486238532,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31161.0,
"min": 15984.0,
"max": 32779.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6469656342659333,
"min": -1.0000000521540642,
"max": 1.72212035636659,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 163.0495977923274,
"min": -30.997401610016823,
"max": 185.98899848759174,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6469656342659333,
"min": -1.0000000521540642,
"max": 1.72212035636659,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 163.0495977923274,
"min": -30.997401610016823,
"max": 185.98899848759174,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02485793294806052,
"min": 0.023075643367239815,
"max": 8.637586353346705,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4609353618579917,
"min": 2.3624375510262325,
"max": 138.2013816535473,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736268063",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736270708"
},
"total": 2644.7429433419998,
"count": 1,
"self": 0.9680932010001015,
"children": {
"run_training.setup": {
"total": 0.0611510090000138,
"count": 1,
"self": 0.0611510090000138
},
"TrainerController.start_learning": {
"total": 2643.713699132,
"count": 1,
"self": 1.9600675859774128,
"children": {
"TrainerController._reset_env": {
"total": 6.391272173999994,
"count": 1,
"self": 6.391272173999994
},
"TrainerController.advance": {
"total": 2635.215365804022,
"count": 64140,
"self": 1.969712547002473,
"children": {
"env_step": {
"total": 1877.9813384169938,
"count": 64140,
"self": 1687.8067575290456,
"children": {
"SubprocessEnvManager._take_step": {
"total": 189.00242469096472,
"count": 64140,
"self": 5.807337278982544,
"children": {
"TorchPolicy.evaluate": {
"total": 183.19508741198217,
"count": 62558,
"self": 183.19508741198217
}
}
},
"workers": {
"total": 1.1721561969833942,
"count": 64140,
"self": 0.0,
"children": {
"worker_root": {
"total": 2636.979889072022,
"count": 64140,
"is_parallel": true,
"self": 1099.6038553030262,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002299437999994325,
"count": 1,
"is_parallel": true,
"self": 0.0007093689999635444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015900690000307804,
"count": 8,
"is_parallel": true,
"self": 0.0015900690000307804
}
}
},
"UnityEnvironment.step": {
"total": 0.05736539199997992,
"count": 1,
"is_parallel": true,
"self": 0.000805590999988226,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005277100000000701,
"count": 1,
"is_parallel": true,
"self": 0.0005277100000000701
},
"communicator.exchange": {
"total": 0.05374838299999851,
"count": 1,
"is_parallel": true,
"self": 0.05374838299999851
},
"steps_from_proto": {
"total": 0.002283707999993112,
"count": 1,
"is_parallel": true,
"self": 0.0006034959999681178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016802120000249943,
"count": 8,
"is_parallel": true,
"self": 0.0016802120000249943
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1537.376033768996,
"count": 64139,
"is_parallel": true,
"self": 40.401838995957405,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.033825481048154,
"count": 64139,
"is_parallel": true,
"self": 27.033825481048154
},
"communicator.exchange": {
"total": 1352.773409651984,
"count": 64139,
"is_parallel": true,
"self": 1352.773409651984
},
"steps_from_proto": {
"total": 117.16695964000652,
"count": 64139,
"is_parallel": true,
"self": 25.525550018082043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.64140962192448,
"count": 513112,
"is_parallel": true,
"self": 91.64140962192448
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 755.2643148400258,
"count": 64140,
"self": 3.74536719001037,
"children": {
"process_trajectory": {
"total": 152.2606718050134,
"count": 64140,
"self": 151.89821834101312,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3624534640002821,
"count": 2,
"self": 0.3624534640002821
}
}
},
"_update_policy": {
"total": 599.258275845002,
"count": 455,
"self": 335.59588270299594,
"children": {
"TorchPPOOptimizer.update": {
"total": 263.66239314200607,
"count": 22824,
"self": 263.66239314200607
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6520002645847853e-06,
"count": 1,
"self": 1.6520002645847853e-06
},
"TrainerController._save_models": {
"total": 0.1469919160003883,
"count": 1,
"self": 0.002363842000704608,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1446280739996837,
"count": 1,
"self": 0.1446280739996837
}
}
}
}
}
}
}