PPO-pyramids / run_logs /timers.json
tommylam's picture
Pyramids
1385206
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34154579043388367,
"min": 0.32417991757392883,
"max": 1.4215493202209473,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10202.65625,
"min": 9849.8828125,
"max": 43124.12109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989949.0,
"min": 29952.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989949.0,
"min": 29952.0,
"max": 989949.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6200006604194641,
"min": -0.10368364304304123,
"max": 0.6200006604194641,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.7001953125,
"min": -24.98775863647461,
"max": 176.7001953125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0031483278144150972,
"min": -0.018381230533123016,
"max": 0.3371911942958832,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.8972734212875366,
"min": -5.201888084411621,
"max": 79.91431427001953,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06980908580382156,
"min": 0.06637119996081596,
"max": 0.07280109544209315,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9773272012535018,
"min": 0.4995243839443535,
"max": 1.0920164316313972,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014992882159680853,
"min": 0.00024052776629044913,
"max": 0.016848503886516226,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20990035023553194,
"min": 0.0028863331954853896,
"max": 0.23587905441122717,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.62136174529286e-06,
"min": 7.62136174529286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010669906443410004,
"min": 0.00010669906443410004,
"max": 0.003507299030900399,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254042142857142,
"min": 0.10254042142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355658999999998,
"min": 1.3886848,
"max": 2.5690996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026378810071428586,
"min": 0.00026378810071428586,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003693033410000002,
"min": 0.003693033410000002,
"max": 0.11693305004,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011995785869657993,
"min": 0.011863996274769306,
"max": 0.4961375296115875,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16794100403785706,
"min": 0.16609594225883484,
"max": 3.4729626178741455,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 292.69607843137254,
"min": 292.69607843137254,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29855.0,
"min": 15984.0,
"max": 32283.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6484627271545869,
"min": -1.0000000521540642,
"max": 1.6688367121529823,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.14319816976786,
"min": -31.995601654052734,
"max": 168.14319816976786,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6484627271545869,
"min": -1.0000000521540642,
"max": 1.6688367121529823,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.14319816976786,
"min": -31.995601654052734,
"max": 168.14319816976786,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03665336981251968,
"min": 0.03665336981251968,
"max": 10.872184567153454,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.7386437208770076,
"min": 3.628451132331975,
"max": 173.95495307445526,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699606759",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/pyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699609015"
},
"total": 2255.537892632,
"count": 1,
"self": 0.492602352999711,
"children": {
"run_training.setup": {
"total": 0.07962301399999205,
"count": 1,
"self": 0.07962301399999205
},
"TrainerController.start_learning": {
"total": 2254.9656672650003,
"count": 1,
"self": 1.2512374000102682,
"children": {
"TrainerController._reset_env": {
"total": 2.039493589999992,
"count": 1,
"self": 2.039493589999992
},
"TrainerController.advance": {
"total": 2251.6114051559907,
"count": 63952,
"self": 1.3239888730331586,
"children": {
"env_step": {
"total": 1470.0474848769745,
"count": 63952,
"self": 1376.1389312879635,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.15112457699675,
"count": 63952,
"self": 4.284842827996286,
"children": {
"TorchPolicy.evaluate": {
"total": 88.86628174900046,
"count": 62557,
"self": 88.86628174900046
}
}
},
"workers": {
"total": 0.7574290120143559,
"count": 63952,
"self": 0.0,
"children": {
"worker_root": {
"total": 2251.6639401619686,
"count": 63952,
"is_parallel": true,
"self": 981.0436058319599,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00952003500000842,
"count": 1,
"is_parallel": true,
"self": 0.007386776000032569,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002133258999975851,
"count": 8,
"is_parallel": true,
"self": 0.002133258999975851
}
}
},
"UnityEnvironment.step": {
"total": 0.07401524899999856,
"count": 1,
"is_parallel": true,
"self": 0.0005591289999955507,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047041499999522784,
"count": 1,
"is_parallel": true,
"self": 0.00047041499999522784
},
"communicator.exchange": {
"total": 0.07139871300000777,
"count": 1,
"is_parallel": true,
"self": 0.07139871300000777
},
"steps_from_proto": {
"total": 0.0015869920000000093,
"count": 1,
"is_parallel": true,
"self": 0.0003462689999906843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001240723000009325,
"count": 8,
"is_parallel": true,
"self": 0.001240723000009325
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1270.6203343300087,
"count": 63951,
"is_parallel": true,
"self": 32.938889550006934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.405153923988948,
"count": 63951,
"is_parallel": true,
"self": 18.405153923988948
},
"communicator.exchange": {
"total": 1125.6860245640369,
"count": 63951,
"is_parallel": true,
"self": 1125.6860245640369
},
"steps_from_proto": {
"total": 93.59026629197592,
"count": 63951,
"is_parallel": true,
"self": 18.35641417802681,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.23385211394911,
"count": 511608,
"is_parallel": true,
"self": 75.23385211394911
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 780.2399314059832,
"count": 63952,
"self": 2.4178023940082767,
"children": {
"process_trajectory": {
"total": 116.52973652497539,
"count": 63952,
"self": 116.33130163097562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19843489399977443,
"count": 2,
"self": 0.19843489399977443
}
}
},
"_update_policy": {
"total": 661.2923924869995,
"count": 449,
"self": 279.5685566459839,
"children": {
"TorchPPOOptimizer.update": {
"total": 381.72383584101556,
"count": 22737,
"self": 381.72383584101556
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.689997568784747e-07,
"count": 1,
"self": 9.689997568784747e-07
},
"TrainerController._save_models": {
"total": 0.06353014999967854,
"count": 1,
"self": 0.0011650989999907324,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06236505099968781,
"count": 1,
"self": 0.06236505099968781
}
}
}
}
}
}
}