Pyramids / run_logs /timers.json
dimi1357's picture
first agent
bcadbb5
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5278274416923523,
"min": 0.5238612294197083,
"max": 1.4264739751815796,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15902.384765625,
"min": 15615.255859375,
"max": 43273.515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29971.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29971.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.17432379722595215,
"min": -0.10377410799264908,
"max": 0.24251849949359894,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 43.929595947265625,
"min": -25.11333465576172,
"max": 61.84221649169922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008939574472606182,
"min": 0.0035323123447597027,
"max": 0.3342987895011902,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.252772808074951,
"min": 0.8901427388191223,
"max": 79.5631103515625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06650228196506731,
"min": 0.0639622387052489,
"max": 0.07317192745006265,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9310319475109423,
"min": 0.4997715534616915,
"max": 1.060928463198555,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008351795632278116,
"min": 0.0001401915527483047,
"max": 0.009569305434120502,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11692513885189362,
"min": 0.0016822986329796561,
"max": 0.14353958151180754,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.456490371678568e-06,
"min": 7.456490371678568e-06,
"max": 0.0002952375873017571,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010439086520349994,
"min": 0.00010439086520349994,
"max": 0.0033319509893496996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248546428571428,
"min": 0.10248546428571428,
"max": 0.19841252857142858,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347965,
"min": 1.3888877,
"max": 2.3843311000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002582978821428571,
"min": 0.0002582978821428571,
"max": 0.009841411604285713,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036161703499999987,
"min": 0.0036161703499999987,
"max": 0.11107396496999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009348786436021328,
"min": 0.009348786436021328,
"max": 0.32108768820762634,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13088300824165344,
"min": 0.13088300824165344,
"max": 2.2476139068603516,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 658.5681818181819,
"min": 592.7872340425532,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28977.0,
"min": 16514.0,
"max": 32760.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9037821799516678,
"min": -0.9999500522390008,
"max": 0.9037821799516678,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 40.67019809782505,
"min": -31.998401671648026,
"max": 43.130998477339745,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9037821799516678,
"min": -0.9999500522390008,
"max": 0.9037821799516678,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 40.67019809782505,
"min": -31.998401671648026,
"max": 43.130998477339745,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06456249599448509,
"min": 0.06272698748459031,
"max": 5.578721653450938,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.905312319751829,
"min": 2.905312319751829,
"max": 94.83826810866594,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679734960",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679737110"
},
"total": 2149.198328728,
"count": 1,
"self": 0.4883026340003198,
"children": {
"run_training.setup": {
"total": 0.12473185199996806,
"count": 1,
"self": 0.12473185199996806
},
"TrainerController.start_learning": {
"total": 2148.5852942419997,
"count": 1,
"self": 1.3935922030445909,
"children": {
"TrainerController._reset_env": {
"total": 6.2242930110001,
"count": 1,
"self": 6.2242930110001
},
"TrainerController.advance": {
"total": 2140.8617318949555,
"count": 63251,
"self": 1.5911163169866995,
"children": {
"env_step": {
"total": 1487.0000204480389,
"count": 63251,
"self": 1370.4269662111162,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.7377016689502,
"count": 63251,
"self": 5.050899038018997,
"children": {
"TorchPolicy.evaluate": {
"total": 110.6868026309312,
"count": 62576,
"self": 110.6868026309312
}
}
},
"workers": {
"total": 0.8353525679724498,
"count": 63251,
"self": 0.0,
"children": {
"worker_root": {
"total": 2143.7346812729543,
"count": 63251,
"is_parallel": true,
"self": 896.0741023779253,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017889869998271024,
"count": 1,
"is_parallel": true,
"self": 0.000564875999771175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012241110000559274,
"count": 8,
"is_parallel": true,
"self": 0.0012241110000559274
}
}
},
"UnityEnvironment.step": {
"total": 0.08811649800009036,
"count": 1,
"is_parallel": true,
"self": 0.0005902279999645543,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005694449998827622,
"count": 1,
"is_parallel": true,
"self": 0.0005694449998827622
},
"communicator.exchange": {
"total": 0.08492650200014396,
"count": 1,
"is_parallel": true,
"self": 0.08492650200014396
},
"steps_from_proto": {
"total": 0.002030323000099088,
"count": 1,
"is_parallel": true,
"self": 0.0006321659998320683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013981570002670196,
"count": 8,
"is_parallel": true,
"self": 0.0013981570002670196
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1247.660578895029,
"count": 63250,
"is_parallel": true,
"self": 32.29595724399792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.70859236403021,
"count": 63250,
"is_parallel": true,
"self": 24.70859236403021
},
"communicator.exchange": {
"total": 1091.7564600410276,
"count": 63250,
"is_parallel": true,
"self": 1091.7564600410276
},
"steps_from_proto": {
"total": 98.89956924597323,
"count": 63250,
"is_parallel": true,
"self": 21.2277243213764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.67184492459683,
"count": 506000,
"is_parallel": true,
"self": 77.67184492459683
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 652.2705951299301,
"count": 63251,
"self": 2.486525832970983,
"children": {
"process_trajectory": {
"total": 123.0351350519561,
"count": 63251,
"self": 122.81202976495638,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22310528699972565,
"count": 2,
"self": 0.22310528699972565
}
}
},
"_update_policy": {
"total": 526.748934245003,
"count": 442,
"self": 338.47214949102977,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.2767847539733,
"count": 22824,
"self": 188.2767847539733
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.059996616793796e-07,
"count": 1,
"self": 8.059996616793796e-07
},
"TrainerController._save_models": {
"total": 0.10567632699985552,
"count": 1,
"self": 0.0014394149998224748,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10423691200003304,
"count": 1,
"self": 0.10423691200003304
}
}
}
}
}
}
}