ppo-Pyramids / run_logs /timers.json
naeisher's picture
First Push
506147d
raw
history blame
18.7 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.2178698778152466,
"min": 0.978621244430542,
"max": 1.531394362449646,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 36302.265625,
"min": 29609.1640625,
"max": 46456.37890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09416185319423676,
"min": -0.10026509314775467,
"max": -0.07534562051296234,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -45.1035270690918,
"min": -48.12724304199219,
"max": -36.01520538330078,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006924273446202278,
"min": 0.006652571260929108,
"max": 0.33888018131256104,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.3167269229888916,
"min": 3.1998867988586426,
"max": 160.62921142578125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06743227906757966,
"min": 0.06530650933077896,
"max": 0.07187657296773979,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.011484186013695,
"min": 0.6368278165203741,
"max": 1.078148594516097,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00026463768029013105,
"min": 8.607955980854361e-05,
"max": 0.009659164084872126,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0039695652043519655,
"min": 0.0012185832862744268,
"max": 0.08693247676384913,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.4928484146000004e-07,
"min": 2.4928484146000004e-07,
"max": 9.848108185600001e-06,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.7392726219000005e-06,
"min": 3.7392726219000005e-06,
"max": 0.000125339346608,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249187333333333,
"min": 0.10249187333333333,
"max": 0.19848106666666665,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373781,
"min": 1.4774085000000003,
"max": 2.653392,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 3.4669545999999996e-05,
"min": 3.4669545999999996e-05,
"max": 0.00098496256,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00052004319,
"min": 0.00052004319,
"max": 0.0125485808,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006172921974211931,
"min": 0.006172921974211931,
"max": 0.35047006607055664,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09259382635354996,
"min": 0.09259382635354996,
"max": 3.1542305946350098,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 997.7142857142857,
"min": 947.8235294117648,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27936.0,
"min": 15984.0,
"max": 32457.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9269714739971927,
"min": -1.0000000447034836,
"max": -0.7257032734492133,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -25.955201271921396,
"min": -31.995601437985897,
"max": -16.000000715255737,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9269714739971927,
"min": -1.0000000447034836,
"max": -0.7257032734492133,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -25.955201271921396,
"min": -31.995601437985897,
"max": -16.000000715255737,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06642726676156079,
"min": 0.06642726676156079,
"max": 7.77874572458677,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8599634693237022,
"min": 1.8599634693237022,
"max": 124.45993159338832,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679268065",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679270257"
},
"total": 2191.6629659499995,
"count": 1,
"self": 0.5846875659990474,
"children": {
"run_training.setup": {
"total": 0.10720040500018513,
"count": 1,
"self": 0.10720040500018513
},
"TrainerController.start_learning": {
"total": 2190.9710779790003,
"count": 1,
"self": 1.3612827802971879,
"children": {
"TrainerController._reset_env": {
"total": 5.8314437640001415,
"count": 1,
"self": 5.8314437640001415
},
"TrainerController.advance": {
"total": 2183.676896880701,
"count": 62979,
"self": 1.3647283913815045,
"children": {
"env_step": {
"total": 1210.1977025874476,
"count": 62979,
"self": 1103.0912961345093,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.31721929490595,
"count": 62979,
"self": 4.633913444762584,
"children": {
"TorchPolicy.evaluate": {
"total": 101.68330585014337,
"count": 62533,
"self": 101.68330585014337
}
}
},
"workers": {
"total": 0.7891871580322913,
"count": 62979,
"self": 0.0,
"children": {
"worker_root": {
"total": 2186.867053809863,
"count": 62979,
"is_parallel": true,
"self": 1190.9609772805125,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001743867000186583,
"count": 1,
"is_parallel": true,
"self": 0.0005617989954771474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011820680047094356,
"count": 8,
"is_parallel": true,
"self": 0.0011820680047094356
}
}
},
"UnityEnvironment.step": {
"total": 0.04660846600017976,
"count": 1,
"is_parallel": true,
"self": 0.000542361998668639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045826900168322027,
"count": 1,
"is_parallel": true,
"self": 0.00045826900168322027
},
"communicator.exchange": {
"total": 0.04401887800122495,
"count": 1,
"is_parallel": true,
"self": 0.04401887800122495
},
"steps_from_proto": {
"total": 0.0015889569986029528,
"count": 1,
"is_parallel": true,
"self": 0.000355634001607541,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012333229969954118,
"count": 8,
"is_parallel": true,
"self": 0.0012333229969954118
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 995.9060765293507,
"count": 62978,
"is_parallel": true,
"self": 30.054837589927047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.997584377746534,
"count": 62978,
"is_parallel": true,
"self": 21.997584377746534
},
"communicator.exchange": {
"total": 855.7250264272552,
"count": 62978,
"is_parallel": true,
"self": 855.7250264272552
},
"steps_from_proto": {
"total": 88.12862813442189,
"count": 62978,
"is_parallel": true,
"self": 18.660616718716483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.4680114157054,
"count": 503824,
"is_parallel": true,
"self": 69.4680114157054
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 972.1144659018719,
"count": 62979,
"self": 2.1299416257825214,
"children": {
"process_trajectory": {
"total": 139.69850369111737,
"count": 62979,
"self": 139.49490435512052,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20359933599684155,
"count": 2,
"self": 0.20359933599684155
}
}
},
"_update_policy": {
"total": 830.286020584972,
"count": 459,
"self": 525.5271057420978,
"children": {
"TorchPPOOptimizer.update": {
"total": 304.7589148428742,
"count": 38100,
"self": 304.7589148428742
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.650029824115336e-07,
"count": 1,
"self": 9.650029824115336e-07
},
"TrainerController._save_models": {
"total": 0.10145358899899293,
"count": 1,
"self": 0.0016745319990150165,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09977905699997791,
"count": 1,
"self": 0.09977905699997791
}
}
}
}
}
}
}