NEO946B's picture
First Push
355efc2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2999937832355499,
"min": 0.2999937832355499,
"max": 1.4785271883010864,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8995.013671875,
"min": 8995.013671875,
"max": 44852.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6456058621406555,
"min": -0.09177502989768982,
"max": 0.6456058621406555,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 184.64328002929688,
"min": -22.209556579589844,
"max": 184.64328002929688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.02899160608649254,
"min": -0.02899160608649254,
"max": 0.5885925889015198,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.29159927368164,
"min": -8.29159927368164,
"max": 139.49644470214844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06946636789611407,
"min": 0.06643765286353502,
"max": 0.07451885870334041,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9725291505455971,
"min": 0.5216320109233828,
"max": 1.071729173817028,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014392362226360653,
"min": 0.0005907844978244992,
"max": 0.017164854935404792,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20149307116904913,
"min": 0.008270982969542989,
"max": 0.24030796909566707,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.616626032585717e-06,
"min": 7.616626032585717e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010663276445620003,
"min": 0.00010663276445620003,
"max": 0.003508660430446599,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253884285714285,
"min": 0.10253884285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355437999999998,
"min": 1.3886848,
"max": 2.5695534,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002636304014285716,
"min": 0.0002636304014285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003690825620000002,
"min": 0.003690825620000002,
"max": 0.11697838466000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010772056877613068,
"min": 0.010772056877613068,
"max": 0.5962501764297485,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15080879628658295,
"min": 0.15080879628658295,
"max": 4.173751354217529,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 296.4019607843137,
"min": 296.4019607843137,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30233.0,
"min": 15984.0,
"max": 34006.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7040593871975889,
"min": -1.0000000521540642,
"max": 1.7040593871975889,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 172.10999810695648,
"min": -29.783001638948917,
"max": 172.10999810695648,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7040593871975889,
"min": -1.0000000521540642,
"max": 1.7040593871975889,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 172.10999810695648,
"min": -29.783001638948917,
"max": 172.10999810695648,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03284236752845365,
"min": 0.03284236752845365,
"max": 12.895050339400768,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.317079120373819,
"min": 3.317079120373819,
"max": 206.3208054304123,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691566337",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691568613"
},
"total": 2276.175187609,
"count": 1,
"self": 0.4759510189996945,
"children": {
"run_training.setup": {
"total": 0.03986612500011688,
"count": 1,
"self": 0.03986612500011688
},
"TrainerController.start_learning": {
"total": 2275.6593704650004,
"count": 1,
"self": 1.3884832019502937,
"children": {
"TrainerController._reset_env": {
"total": 4.322240403000251,
"count": 1,
"self": 4.322240403000251
},
"TrainerController.advance": {
"total": 2269.85323200905,
"count": 63836,
"self": 1.4225020370422499,
"children": {
"env_step": {
"total": 1602.2743807209908,
"count": 63836,
"self": 1492.1214828867505,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.32951496308942,
"count": 63836,
"self": 4.859396207132704,
"children": {
"TorchPolicy.evaluate": {
"total": 104.47011875595672,
"count": 62539,
"self": 104.47011875595672
}
}
},
"workers": {
"total": 0.8233828711508977,
"count": 63836,
"self": 0.0,
"children": {
"worker_root": {
"total": 2270.6069093929345,
"count": 63836,
"is_parallel": true,
"self": 894.7847358946938,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020542110005408176,
"count": 1,
"is_parallel": true,
"self": 0.0006617500021093292,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013924609984314884,
"count": 8,
"is_parallel": true,
"self": 0.0013924609984314884
}
}
},
"UnityEnvironment.step": {
"total": 0.08804747100020904,
"count": 1,
"is_parallel": true,
"self": 0.0005595889997493941,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004596150001816568,
"count": 1,
"is_parallel": true,
"self": 0.0004596150001816568
},
"communicator.exchange": {
"total": 0.0848628550002104,
"count": 1,
"is_parallel": true,
"self": 0.0848628550002104
},
"steps_from_proto": {
"total": 0.0021654120000675903,
"count": 1,
"is_parallel": true,
"self": 0.0004176839984211256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017477280016464647,
"count": 8,
"is_parallel": true,
"self": 0.0017477280016464647
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1375.8221734982408,
"count": 63835,
"is_parallel": true,
"self": 33.90833273736462,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.27073943395044,
"count": 63835,
"is_parallel": true,
"self": 23.27073943395044
},
"communicator.exchange": {
"total": 1212.9101757889703,
"count": 63835,
"is_parallel": true,
"self": 1212.9101757889703
},
"steps_from_proto": {
"total": 105.73292553795545,
"count": 63835,
"is_parallel": true,
"self": 20.855085531205077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.87784000675038,
"count": 510680,
"is_parallel": true,
"self": 84.87784000675038
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.1563492510168,
"count": 63836,
"self": 2.5901429859368363,
"children": {
"process_trajectory": {
"total": 111.68073539607485,
"count": 63836,
"self": 111.4162730280741,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26446236800074985,
"count": 2,
"self": 0.26446236800074985
}
}
},
"_update_policy": {
"total": 551.8854708690051,
"count": 453,
"self": 359.90227354510716,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.9831973238979,
"count": 22785,
"self": 191.9831973238979
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0349995136493817e-06,
"count": 1,
"self": 1.0349995136493817e-06
},
"TrainerController._save_models": {
"total": 0.09541381600047316,
"count": 1,
"self": 0.0014313100000435952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09398250600042957,
"count": 1,
"self": 0.09398250600042957
}
}
}
}
}
}
}