ppo-Pyramids / run_logs /timers.json
mazayo's picture
First Push
f9dc0eb
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6108947396278381,
"min": 0.5913000702857971,
"max": 1.5277938842773438,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18248.6484375,
"min": 17644.39453125,
"max": 46347.15625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4226970374584198,
"min": -0.15486682951450348,
"max": 0.46327686309814453,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 116.24168395996094,
"min": -37.322906494140625,
"max": 126.4745864868164,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06831049919128418,
"min": -0.12132998555898666,
"max": 0.6831167340278625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.78538703918457,
"min": -32.15244674682617,
"max": 164.63113403320312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.032634509236751395,
"min": 0.029665305603495135,
"max": 0.04076152978314444,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.22844156465725976,
"min": 0.16304611913257777,
"max": 0.3054855889931787,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016176609643956737,
"min": 0.0016630183671762497,
"max": 0.01758412607125051,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11323626750769715,
"min": 0.011641128570233748,
"max": 0.1330827446266388,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.191054745871432e-06,
"min": 7.191054745871432e-06,
"max": 0.00029544960151680006,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.0337383221100025e-05,
"min": 5.0337383221100025e-05,
"max": 0.0019430709523097,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10239698571428572,
"min": 0.10239698571428572,
"max": 0.19848320000000003,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7167789000000001,
"min": 0.7167789000000001,
"max": 1.3476903,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000249458872857143,
"min": 0.000249458872857143,
"max": 0.009848471680000002,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0017462121100000009,
"min": 0.0017462121100000009,
"max": 0.06477426097,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.029747527092695236,
"min": 0.029747527092695236,
"max": 0.8650932312011719,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20823268592357635,
"min": 0.20823268592357635,
"max": 3.5050172805786133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 431.7042253521127,
"min": 377.82894736842104,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30651.0,
"min": 15984.0,
"max": 32187.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4837661767509622,
"min": -1.0000000521540642,
"max": 1.5290527584859066,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 105.34739854931831,
"min": -29.806601651012897,
"max": 115.28279858827591,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4837661767509622,
"min": -1.0000000521540642,
"max": 1.5290527584859066,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 105.34739854931831,
"min": -29.806601651012897,
"max": 115.28279858827591,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.13184046082552786,
"min": 0.12332039502679866,
"max": 11.199776386842132,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 9.360672718612477,
"min": 9.360672718612477,
"max": 267.5677567124367,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696036895",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696038911"
},
"total": 2015.474267846,
"count": 1,
"self": 0.4879589159997977,
"children": {
"run_training.setup": {
"total": 0.04072322100000747,
"count": 1,
"self": 0.04072322100000747
},
"TrainerController.start_learning": {
"total": 2014.9455857090002,
"count": 1,
"self": 1.388280593952686,
"children": {
"TrainerController._reset_env": {
"total": 4.111138328000152,
"count": 1,
"self": 4.111138328000152
},
"TrainerController.advance": {
"total": 2009.3544861220475,
"count": 63702,
"self": 1.3551691760085305,
"children": {
"env_step": {
"total": 1535.2195172810102,
"count": 63702,
"self": 1426.2943200390343,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.12746983500097,
"count": 63702,
"self": 4.809192913028255,
"children": {
"TorchPolicy.evaluate": {
"total": 103.31827692197271,
"count": 62559,
"self": 103.31827692197271
}
}
},
"workers": {
"total": 0.7977274069749001,
"count": 63702,
"self": 0.0,
"children": {
"worker_root": {
"total": 2010.0421969169759,
"count": 63702,
"is_parallel": true,
"self": 698.2265860330217,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018294110000169894,
"count": 1,
"is_parallel": true,
"self": 0.0005624410000564239,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012669699999605655,
"count": 8,
"is_parallel": true,
"self": 0.0012669699999605655
}
}
},
"UnityEnvironment.step": {
"total": 0.05321653200007859,
"count": 1,
"is_parallel": true,
"self": 0.0006318380001175683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000662981999994372,
"count": 1,
"is_parallel": true,
"self": 0.000662981999994372
},
"communicator.exchange": {
"total": 0.049185785999952714,
"count": 1,
"is_parallel": true,
"self": 0.049185785999952714
},
"steps_from_proto": {
"total": 0.002735926000013933,
"count": 1,
"is_parallel": true,
"self": 0.00035638199983623053,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023795440001777024,
"count": 8,
"is_parallel": true,
"self": 0.0023795440001777024
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1311.8156108839542,
"count": 63701,
"is_parallel": true,
"self": 34.22474295986899,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.772985178032513,
"count": 63701,
"is_parallel": true,
"self": 22.772985178032513
},
"communicator.exchange": {
"total": 1148.8495001020651,
"count": 63701,
"is_parallel": true,
"self": 1148.8495001020651
},
"steps_from_proto": {
"total": 105.96838264398752,
"count": 63701,
"is_parallel": true,
"self": 20.5462574791527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.42212516483482,
"count": 509608,
"is_parallel": true,
"self": 85.42212516483482
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 472.7797996650288,
"count": 63702,
"self": 2.5116337910337734,
"children": {
"process_trajectory": {
"total": 113.11816792499417,
"count": 63702,
"self": 112.91474547499433,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2034224499998345,
"count": 2,
"self": 0.2034224499998345
}
}
},
"_update_policy": {
"total": 357.14999794900086,
"count": 234,
"self": 295.83719117899045,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.31280677001041,
"count": 5736,
"self": 61.31280677001041
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4150000424706377e-06,
"count": 1,
"self": 1.4150000424706377e-06
},
"TrainerController._save_models": {
"total": 0.09167924999974275,
"count": 1,
"self": 0.0013463619998219656,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09033288799992079,
"count": 1,
"self": 0.09033288799992079
}
}
}
}
}
}
}