ppo-Pyramids / run_logs /timers.json
bwalser's picture
First Push
85a9836 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3212706446647644,
"min": 0.3212706446647644,
"max": 1.5096741914749146,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9550.7333984375,
"min": 9550.7333984375,
"max": 45797.4765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6312903761863708,
"min": -0.16419419646263123,
"max": 0.6736616492271423,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.13002014160156,
"min": -38.914024353027344,
"max": 191.9935760498047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015564392320811749,
"min": -0.004267923068255186,
"max": 0.29681316018104553,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.342465400695801,
"min": -1.1608750820159912,
"max": 71.53197479248047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0682397596253958,
"min": 0.06557243978576957,
"max": 0.07390170244518487,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9553566347555412,
"min": 0.5173119171162941,
"max": 1.0963641298962208,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01397342407868941,
"min": 0.0011464143628346136,
"max": 0.015867200412968588,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19562793710165174,
"min": 0.013756972354015363,
"max": 0.2380080061945288,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.401868961314286e-06,
"min": 7.401868961314286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001036261654584,
"min": 0.0001036261654584,
"max": 0.0033834002721999995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246725714285713,
"min": 0.10246725714285713,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345416,
"min": 1.3886848,
"max": 2.5278,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002564789885714286,
"min": 0.0002564789885714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00359070584,
"min": 0.00359070584,
"max": 0.11280722,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009490794502198696,
"min": 0.009490794502198696,
"max": 0.42878207564353943,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1328711211681366,
"min": 0.1328711211681366,
"max": 3.001474618911743,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 325.3440860215054,
"min": 274.99056603773585,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30257.0,
"min": 15984.0,
"max": 32668.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6736956322970598,
"min": -1.0000000521540642,
"max": 1.681734635835827,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.9799981713295,
"min": -30.99680159986019,
"max": 178.47839824855328,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6736956322970598,
"min": -1.0000000521540642,
"max": 1.681734635835827,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.9799981713295,
"min": -30.99680159986019,
"max": 178.47839824855328,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03206343422299023,
"min": 0.028250009673386464,
"max": 8.413440052419901,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9498359485151013,
"min": 2.9498359485151013,
"max": 134.61504083871841,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724940279",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids3 --no-graphics",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1724942822"
},
"total": 2542.9706848959995,
"count": 1,
"self": 0.5320955819997835,
"children": {
"run_training.setup": {
"total": 0.08044538800004375,
"count": 1,
"self": 0.08044538800004375
},
"TrainerController.start_learning": {
"total": 2542.3581439259997,
"count": 1,
"self": 2.014526585981457,
"children": {
"TrainerController._reset_env": {
"total": 2.3941783719997147,
"count": 1,
"self": 2.3941783719997147
},
"TrainerController.advance": {
"total": 2537.862395350019,
"count": 64019,
"self": 1.9511561571130187,
"children": {
"env_step": {
"total": 1813.5757900169124,
"count": 64019,
"self": 1649.6543028220572,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.7489851848527,
"count": 64019,
"self": 5.96284653487146,
"children": {
"TorchPolicy.evaluate": {
"total": 156.78613864998124,
"count": 62564,
"self": 156.78613864998124
}
}
},
"workers": {
"total": 1.1725020100025176,
"count": 64019,
"self": 0.0,
"children": {
"worker_root": {
"total": 2535.9339761719843,
"count": 64019,
"is_parallel": true,
"self": 1026.545629238014,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029493209999600367,
"count": 1,
"is_parallel": true,
"self": 0.0008297760000459675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002119544999914069,
"count": 8,
"is_parallel": true,
"self": 0.002119544999914069
}
}
},
"UnityEnvironment.step": {
"total": 0.054921843000101944,
"count": 1,
"is_parallel": true,
"self": 0.0005929150001975358,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005381959999795072,
"count": 1,
"is_parallel": true,
"self": 0.0005381959999795072
},
"communicator.exchange": {
"total": 0.05183892300010484,
"count": 1,
"is_parallel": true,
"self": 0.05183892300010484
},
"steps_from_proto": {
"total": 0.0019518089998200594,
"count": 1,
"is_parallel": true,
"self": 0.0004128480004510493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00153896099936901,
"count": 8,
"is_parallel": true,
"self": 0.00153896099936901
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1509.3883469339703,
"count": 64018,
"is_parallel": true,
"self": 38.70039026799486,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.872592083902873,
"count": 64018,
"is_parallel": true,
"self": 27.872592083902873
},
"communicator.exchange": {
"total": 1313.9751875549614,
"count": 64018,
"is_parallel": true,
"self": 1313.9751875549614
},
"steps_from_proto": {
"total": 128.84017702711117,
"count": 64018,
"is_parallel": true,
"self": 26.545686797984672,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.2944902291265,
"count": 512144,
"is_parallel": true,
"self": 102.2944902291265
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 722.3354491759937,
"count": 64019,
"self": 3.561608478996277,
"children": {
"process_trajectory": {
"total": 139.56324720199927,
"count": 64019,
"self": 139.35028932899968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21295787299959557,
"count": 2,
"self": 0.21295787299959557
}
}
},
"_update_policy": {
"total": 579.2105934949982,
"count": 452,
"self": 364.71019310702286,
"children": {
"TorchPPOOptimizer.update": {
"total": 214.50040038797533,
"count": 22821,
"self": 214.50040038797533
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0859994290512986e-06,
"count": 1,
"self": 1.0859994290512986e-06
},
"TrainerController._save_models": {
"total": 0.08704253199994127,
"count": 1,
"self": 0.0014544270006808802,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0855881049992604,
"count": 1,
"self": 0.0855881049992604
}
}
}
}
}
}
}