testpyramidsrnd / run_logs /timers.json
comodoro's picture
First Pyramids
18ea2bb
raw
history blame
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6965994238853455,
"min": 0.682566225528717,
"max": 1.4839755296707153,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 20842.25390625,
"min": 20509.75,
"max": 45017.8828125,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479982.0,
"min": 29952.0,
"max": 479982.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479982.0,
"min": 29952.0,
"max": 479982.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.13758805394172668,
"min": -0.09895379096269608,
"max": 0.13758805394172668,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 34.67218780517578,
"min": -23.452049255371094,
"max": 34.67218780517578,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.033422693610191345,
"min": 0.030728956684470177,
"max": 0.3146437108516693,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.422518730163574,
"min": 7.405678749084473,
"max": 74.570556640625,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06697702044210233,
"min": 0.06697702044210233,
"max": 0.07586123375666719,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9376782861894325,
"min": 0.4816076282166606,
"max": 1.0298847749648963,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01006578049512501,
"min": 0.0009342318036813654,
"max": 0.01054055872292439,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14092092693175015,
"min": 0.009101792448277711,
"max": 0.15810838084386586,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0660364541814282e-05,
"min": 2.0660364541814282e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00028924510358539996,
"min": 0.00028924510358539996,
"max": 0.0028535985488005997,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10688675714285716,
"min": 0.10688675714285716,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4964146000000003,
"min": 1.3382272,
"max": 2.3385624,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0006979870385714288,
"min": 0.0006979870385714288,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009771818540000003,
"min": 0.009771818540000003,
"max": 0.09515482006,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02447795495390892,
"min": 0.02447795495390892,
"max": 0.36970290541648865,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3426913619041443,
"min": 0.3426913619041443,
"max": 2.5879204273223877,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 688.7380952380952,
"min": 688.7380952380952,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28927.0,
"min": 15984.0,
"max": 32599.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7656633793944265,
"min": -1.0000000521540642,
"max": 0.7656633793944265,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 31.39219855517149,
"min": -32.000001668930054,
"max": 31.39219855517149,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7656633793944265,
"min": -1.0000000521540642,
"max": 0.7656633793944265,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 31.39219855517149,
"min": -32.000001668930054,
"max": 31.39219855517149,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1792522233842713,
"min": 0.1792522233842713,
"max": 7.8880319045856595,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.349341158755124,
"min": 7.349341158755124,
"max": 126.20851047337055,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1659191049",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1659192080"
},
"total": 1031.260329993,
"count": 1,
"self": 0.4331750519997968,
"children": {
"run_training.setup": {
"total": 0.044496667000089474,
"count": 1,
"self": 0.044496667000089474
},
"TrainerController.start_learning": {
"total": 1030.782658274,
"count": 1,
"self": 0.7526382590006051,
"children": {
"TrainerController._reset_env": {
"total": 9.853446760999987,
"count": 1,
"self": 9.853446760999987
},
"TrainerController.advance": {
"total": 1020.0808047899995,
"count": 31609,
"self": 0.7784692919898362,
"children": {
"env_step": {
"total": 652.751188417006,
"count": 31609,
"self": 596.0545316539603,
"children": {
"SubprocessEnvManager._take_step": {
"total": 56.30237627701581,
"count": 31609,
"self": 2.4306920730223283,
"children": {
"TorchPolicy.evaluate": {
"total": 53.87168420399348,
"count": 31322,
"self": 18.28812794298335,
"children": {
"TorchPolicy.sample_actions": {
"total": 35.58355626101013,
"count": 31322,
"self": 35.58355626101013
}
}
}
}
},
"workers": {
"total": 0.39428048602985655,
"count": 31609,
"self": 0.0,
"children": {
"worker_root": {
"total": 1028.4895228720047,
"count": 31609,
"is_parallel": true,
"self": 486.8167823900002,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007886912000003576,
"count": 1,
"is_parallel": true,
"self": 0.004399226999908024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003487685000095553,
"count": 8,
"is_parallel": true,
"self": 0.003487685000095553
}
}
},
"UnityEnvironment.step": {
"total": 0.05141290099993512,
"count": 1,
"is_parallel": true,
"self": 0.0005321629998888966,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005684420000307,
"count": 1,
"is_parallel": true,
"self": 0.0005684420000307
},
"communicator.exchange": {
"total": 0.04831634300001042,
"count": 1,
"is_parallel": true,
"self": 0.04831634300001042
},
"steps_from_proto": {
"total": 0.001995953000005102,
"count": 1,
"is_parallel": true,
"self": 0.0005100589997937277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001485894000211374,
"count": 8,
"is_parallel": true,
"self": 0.001485894000211374
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 541.6727404820045,
"count": 31608,
"is_parallel": true,
"self": 14.241090669000982,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.23796813500735,
"count": 31608,
"is_parallel": true,
"self": 13.23796813500735
},
"communicator.exchange": {
"total": 464.17815307300293,
"count": 31608,
"is_parallel": true,
"self": 464.17815307300293
},
"steps_from_proto": {
"total": 50.01552860499328,
"count": 31608,
"is_parallel": true,
"self": 12.449649666965456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.56587893802782,
"count": 252864,
"is_parallel": true,
"self": 37.56587893802782
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 366.5511470810037,
"count": 31609,
"self": 1.2610165759749634,
"children": {
"process_trajectory": {
"total": 86.20302966602787,
"count": 31609,
"self": 85.97524145502791,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22778821099996094,
"count": 1,
"self": 0.22778821099996094
}
}
},
"_update_policy": {
"total": 279.08710083900087,
"count": 211,
"self": 112.31944955900485,
"children": {
"TorchPPOOptimizer.update": {
"total": 166.76765127999602,
"count": 11418,
"self": 166.76765127999602
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1739998626580928e-06,
"count": 1,
"self": 1.1739998626580928e-06
},
"TrainerController._save_models": {
"total": 0.09576728999991246,
"count": 1,
"self": 0.0015520419999575097,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09421524799995495,
"count": 1,
"self": 0.09421524799995495
}
}
}
}
}
}
}