ppo-Pyramids / run_logs /timers.json
imar0's picture
First Push
6217bd7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.18617230653762817,
"min": 0.18617230653762817,
"max": 1.4845129251480103,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 5629.8505859375,
"min": 5629.8505859375,
"max": 45034.18359375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499916.0,
"min": 29952.0,
"max": 1499916.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499916.0,
"min": 29952.0,
"max": 1499916.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7307161092758179,
"min": -0.149481862783432,
"max": 0.7492076754570007,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 215.56124877929688,
"min": -35.42720031738281,
"max": 224.01309204101562,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.03664468601346016,
"min": -0.03664468601346016,
"max": 0.4279721975326538,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -10.810182571411133,
"min": -10.810182571411133,
"max": 101.42941284179688,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07124440428155705,
"min": 0.06368099707618405,
"max": 0.07446024584898299,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9974216599417987,
"min": 0.5212217209428809,
"max": 1.0970730976938892,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017000972014711322,
"min": 0.0021087555317254388,
"max": 0.017000972014711322,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2380136082059585,
"min": 0.021087555317254387,
"max": 0.24874341461947874,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.8985704624142837e-06,
"min": 2.8985704624142837e-06,
"max": 0.00029676708679192377,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.057998647379997e-05,
"min": 4.057998647379997e-05,
"max": 0.003823484925505066,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10096615714285717,
"min": 0.10096615714285717,
"max": 0.19892236190476195,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4135262000000004,
"min": 1.3924565333333336,
"max": 2.714839733333334,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001065190985714285,
"min": 0.0001065190985714285,
"max": 0.009892343954285714,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.001491267379999999,
"min": 0.001491267379999999,
"max": 0.12746204384,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00981984380632639,
"min": 0.009456416592001915,
"max": 0.5637169480323792,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1374778151512146,
"min": 0.1323898285627365,
"max": 3.9460184574127197,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 250.69421487603304,
"min": 249.1271186440678,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30334.0,
"min": 15984.0,
"max": 32543.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7339458889404282,
"min": -1.0000000521540642,
"max": 1.7339458889404282,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 211.54139845073223,
"min": -28.573201686143875,
"max": 211.54139845073223,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7339458889404282,
"min": -1.0000000521540642,
"max": 1.7339458889404282,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 211.54139845073223,
"min": -28.573201686143875,
"max": 211.54139845073223,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02522740954319473,
"min": 0.02522740954319473,
"max": 11.12333600781858,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0777439642697573,
"min": 2.7681884536868893,
"max": 177.97337612509727,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677883316",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677887415"
},
"total": 4099.357167575,
"count": 1,
"self": 0.4463813399997889,
"children": {
"run_training.setup": {
"total": 0.11649313399999528,
"count": 1,
"self": 0.11649313399999528
},
"TrainerController.start_learning": {
"total": 4098.794293101,
"count": 1,
"self": 2.500096043107078,
"children": {
"TrainerController._reset_env": {
"total": 9.29276846900001,
"count": 1,
"self": 9.29276846900001
},
"TrainerController.advance": {
"total": 4086.9174338128937,
"count": 97005,
"self": 2.623944000879874,
"children": {
"env_step": {
"total": 2902.6345002500207,
"count": 97005,
"self": 2705.3667509040542,
"children": {
"SubprocessEnvManager._take_step": {
"total": 195.67365381293416,
"count": 97005,
"self": 8.007475172883744,
"children": {
"TorchPolicy.evaluate": {
"total": 187.66617864005042,
"count": 93815,
"self": 64.37438756107991,
"children": {
"TorchPolicy.sample_actions": {
"total": 123.2917910789705,
"count": 93815,
"self": 123.2917910789705
}
}
}
}
},
"workers": {
"total": 1.5940955330323732,
"count": 97005,
"self": 0.0,
"children": {
"worker_root": {
"total": 4088.6202401469336,
"count": 97005,
"is_parallel": true,
"self": 1587.96803850391,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004974586000003001,
"count": 1,
"is_parallel": true,
"self": 0.0037582870000392177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001216298999963783,
"count": 8,
"is_parallel": true,
"self": 0.001216298999963783
}
}
},
"UnityEnvironment.step": {
"total": 0.09503308199998628,
"count": 1,
"is_parallel": true,
"self": 0.0005695229999673757,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004733150000220121,
"count": 1,
"is_parallel": true,
"self": 0.0004733150000220121
},
"communicator.exchange": {
"total": 0.09235710299998345,
"count": 1,
"is_parallel": true,
"self": 0.09235710299998345
},
"steps_from_proto": {
"total": 0.0016331410000134383,
"count": 1,
"is_parallel": true,
"self": 0.00041921300004332807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012139279999701102,
"count": 8,
"is_parallel": true,
"self": 0.0012139279999701102
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2500.6522016430235,
"count": 97004,
"is_parallel": true,
"self": 55.68702987897814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 37.64980132499312,
"count": 97004,
"is_parallel": true,
"self": 37.64980132499312
},
"communicator.exchange": {
"total": 2254.145149077041,
"count": 97004,
"is_parallel": true,
"self": 2254.145149077041
},
"steps_from_proto": {
"total": 153.17022136201138,
"count": 97004,
"is_parallel": true,
"self": 37.78863549796904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 115.38158586404234,
"count": 776032,
"is_parallel": true,
"self": 115.38158586404234
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1181.6589895619932,
"count": 97005,
"self": 4.892591148138081,
"children": {
"process_trajectory": {
"total": 262.29388926185493,
"count": 97005,
"self": 261.95378288085453,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34010638100039614,
"count": 3,
"self": 0.34010638100039614
}
}
},
"_update_policy": {
"total": 914.4725091520002,
"count": 694,
"self": 344.47765280601834,
"children": {
"TorchPPOOptimizer.update": {
"total": 569.9948563459818,
"count": 34200,
"self": 569.9948563459818
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.950002327561378e-07,
"count": 1,
"self": 7.950002327561378e-07
},
"TrainerController._save_models": {
"total": 0.08399398099936661,
"count": 1,
"self": 0.0013888119992770953,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08260516900008952,
"count": 1,
"self": 0.08260516900008952
}
}
}
}
}
}
}