britojr's picture
First Push
c4643a1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.936905026435852,
"min": 0.936905026435852,
"max": 1.501382827758789,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 28062.1796875,
"min": 28062.1796875,
"max": 45545.94921875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89968.0,
"min": 29952.0,
"max": 89968.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89968.0,
"min": 29952.0,
"max": 89968.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07839976996183395,
"min": -0.1357463151216507,
"max": -0.07839976996183395,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -18.894344329833984,
"min": -32.171875,
"max": -18.894344329833984,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2135143131017685,
"min": 0.2135143131017685,
"max": 0.4094493091106415,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 51.45695114135742,
"min": 51.45695114135742,
"max": 97.03948974609375,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848575404127802,
"min": 0.06745194194526148,
"max": 0.07103279675208897,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6163717863715021,
"min": 0.47216359361683036,
"max": 0.6163717863715021,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00110619817132268,
"min": 0.0007614107352122213,
"max": 0.00617994070284163,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.00995578354190412,
"min": 0.005329875146485549,
"max": 0.04325958491989141,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.621607459466666e-05,
"min": 7.621607459466666e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006859446713519999,
"min": 0.0006859446713519999,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1254053333333333,
"min": 0.1254053333333333,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1286479999999999,
"min": 1.0911359999999999,
"max": 1.2868480000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025479928,
"min": 0.0025479928,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0229319352,
"min": 0.0229319352,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.12636737525463104,
"min": 0.12636737525463104,
"max": 0.4556843936443329,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.1373063325881958,
"min": 1.1373063325881958,
"max": 3.189790725708008,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 987.6060606060606,
"min": 987.6060606060606,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32591.0,
"min": 15984.0,
"max": 32591.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9279455075209792,
"min": -1.0000000521540642,
"max": -0.9279455075209792,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.62220174819231,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9279455075209792,
"min": -1.0000000521540642,
"max": -0.9279455075209792,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.62220174819231,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.4591752089785808,
"min": 1.4591752089785808,
"max": 9.527521381154656,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 48.15278189629316,
"min": 48.15278189629316,
"max": 152.4403420984745,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679157949",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679158214"
},
"total": 265.71025184400014,
"count": 1,
"self": 0.4777992310002901,
"children": {
"run_training.setup": {
"total": 0.0989278029999241,
"count": 1,
"self": 0.0989278029999241
},
"TrainerController.start_learning": {
"total": 265.1335248099999,
"count": 1,
"self": 0.16002492801021617,
"children": {
"TrainerController._reset_env": {
"total": 3.2905227669998567,
"count": 1,
"self": 3.2905227669998567
},
"TrainerController.advance": {
"total": 261.58688104998987,
"count": 6266,
"self": 0.1626546129966755,
"children": {
"env_step": {
"total": 130.55104172499637,
"count": 6266,
"self": 116.49186898698622,
"children": {
"SubprocessEnvManager._take_step": {
"total": 13.950425139999652,
"count": 6266,
"self": 0.46834564900132136,
"children": {
"TorchPolicy.evaluate": {
"total": 13.48207949099833,
"count": 6264,
"self": 13.48207949099833
}
}
},
"workers": {
"total": 0.1087475980104955,
"count": 6266,
"self": 0.0,
"children": {
"worker_root": {
"total": 264.72290477200363,
"count": 6266,
"is_parallel": true,
"self": 160.95561504200305,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020154500000444386,
"count": 1,
"is_parallel": true,
"self": 0.000641289999748551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013741600002958876,
"count": 8,
"is_parallel": true,
"self": 0.0013741600002958876
}
}
},
"UnityEnvironment.step": {
"total": 0.048708931000192024,
"count": 1,
"is_parallel": true,
"self": 0.0006322600002022227,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004872300000897667,
"count": 1,
"is_parallel": true,
"self": 0.0004872300000897667
},
"communicator.exchange": {
"total": 0.04585081100003663,
"count": 1,
"is_parallel": true,
"self": 0.04585081100003663
},
"steps_from_proto": {
"total": 0.0017386299998634058,
"count": 1,
"is_parallel": true,
"self": 0.0003493499993965088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001389280000466897,
"count": 8,
"is_parallel": true,
"self": 0.001389280000466897
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 103.76728973000058,
"count": 6265,
"is_parallel": true,
"self": 3.5286042160028046,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.192956381986278,
"count": 6265,
"is_parallel": true,
"self": 2.192956381986278
},
"communicator.exchange": {
"total": 88.02292562000616,
"count": 6265,
"is_parallel": true,
"self": 88.02292562000616
},
"steps_from_proto": {
"total": 10.02280351200534,
"count": 6265,
"is_parallel": true,
"self": 2.116278868003292,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.906524644002047,
"count": 50120,
"is_parallel": true,
"self": 7.906524644002047
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 130.87318471199683,
"count": 6266,
"self": 0.21979200199757543,
"children": {
"process_trajectory": {
"total": 14.09618887699935,
"count": 6266,
"self": 14.09618887699935
},
"_update_policy": {
"total": 116.5572038329999,
"count": 27,
"self": 32.92581494599813,
"children": {
"TorchPPOOptimizer.update": {
"total": 83.63138888700178,
"count": 2292,
"self": 83.63138888700178
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0599999313853914e-06,
"count": 1,
"self": 1.0599999313853914e-06
},
"TrainerController._save_models": {
"total": 0.09609500500005197,
"count": 1,
"self": 0.0012554500001442648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0948395549999077,
"count": 1,
"self": 0.0948395549999077
}
}
}
}
}
}
}