testpyramidsrnd / run_logs /timers.json
andres-hsn's picture
First Pyramids
e71e1fe
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7718034982681274,
"min": 0.769055187702179,
"max": 1.3696790933609009,
"count": 22
},
"Pyramids.Policy.Entropy.sum": {
"value": 22919.4765625,
"min": 22919.4765625,
"max": 41550.5859375,
"count": 22
},
"Pyramids.Step.mean": {
"value": 659955.0,
"min": 29952.0,
"max": 659955.0,
"count": 22
},
"Pyramids.Step.sum": {
"value": 659955.0,
"min": 29952.0,
"max": 659955.0,
"count": 22
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3880106210708618,
"min": -0.11473724246025085,
"max": 0.3880106210708618,
"count": 22
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 102.82281494140625,
"min": -27.536937713623047,
"max": 102.82281494140625,
"count": 22
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00528643187135458,
"min": 0.00528643187135458,
"max": 0.5329198241233826,
"count": 22
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.4009044170379639,
"min": 1.4009044170379639,
"max": 126.30199432373047,
"count": 22
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0674748248683976,
"min": 0.06509932343631615,
"max": 0.07344120107119946,
"count": 22
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9446475481575665,
"min": 0.48254589916487234,
"max": 1.037305413453164,
"count": 22
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013391631856523289,
"min": 0.0008747547597472183,
"max": 0.015420653826373898,
"count": 22
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18748284599132606,
"min": 0.012246566636461056,
"max": 0.18748284599132606,
"count": 22
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00023552407863483804,
"min": 0.00023552407863483804,
"max": 0.00029838354339596195,
"count": 22
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0032973371008877326,
"min": 0.0020886848037717336,
"max": 0.0040107099630967,
"count": 22
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.17850801904761907,
"min": 0.17850801904761907,
"max": 0.19946118095238097,
"count": 22
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4991122666666667,
"min": 1.3962282666666668,
"max": 2.7369033000000003,
"count": 22
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007852951102857142,
"min": 0.007852951102857142,
"max": 0.009946171977142856,
"count": 22
},
"Pyramids.Policy.Beta.sum": {
"value": 0.10994131543999999,
"min": 0.06962320384,
"max": 0.13369663967,
"count": 22
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010041001252830029,
"min": 0.010041001252830029,
"max": 0.590167224407196,
"count": 22
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14057402312755585,
"min": 0.14057402312755585,
"max": 4.131170749664307,
"count": 22
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 468.7647058823529,
"min": 468.7647058823529,
"max": 999.0,
"count": 22
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31876.0,
"min": 15984.0,
"max": 32719.0,
"count": 22
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.266444089329418,
"min": -1.0000000521540642,
"max": 1.3186524323508388,
"count": 22
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 86.11819807440042,
"min": -28.74720162153244,
"max": 86.11819807440042,
"count": 22
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.266444089329418,
"min": -1.0000000521540642,
"max": 1.3186524323508388,
"count": 22
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 86.11819807440042,
"min": -28.74720162153244,
"max": 86.11819807440042,
"count": 22
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0486753513379132,
"min": 0.0486753513379132,
"max": 13.090168526396155,
"count": 22
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.309923890978098,
"min": 3.182563574271626,
"max": 209.44269642233849,
"count": 22
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1659982512",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1659983984"
},
"total": 1472.6413112869998,
"count": 1,
"self": 0.3613817329999165,
"children": {
"run_training.setup": {
"total": 0.17209062500000982,
"count": 1,
"self": 0.17209062500000982
},
"TrainerController.start_learning": {
"total": 1472.1078389289999,
"count": 1,
"self": 1.1836729120070686,
"children": {
"TrainerController._reset_env": {
"total": 12.174847659999955,
"count": 1,
"self": 12.174847659999955
},
"TrainerController.advance": {
"total": 1458.5779057439922,
"count": 42436,
"self": 1.2518434319845255,
"children": {
"env_step": {
"total": 922.4360542100036,
"count": 42436,
"self": 837.172228768038,
"children": {
"SubprocessEnvManager._take_step": {
"total": 84.63412457599122,
"count": 42436,
"self": 3.611470007965181,
"children": {
"TorchPolicy.evaluate": {
"total": 81.02265456802604,
"count": 41898,
"self": 26.933642801973747,
"children": {
"TorchPolicy.sample_actions": {
"total": 54.089011766052295,
"count": 41898,
"self": 54.089011766052295
}
}
}
}
},
"workers": {
"total": 0.6297008659743142,
"count": 42435,
"self": 0.0,
"children": {
"worker_root": {
"total": 1469.1872678530383,
"count": 42435,
"is_parallel": true,
"self": 712.4351316240399,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010425950000012563,
"count": 1,
"is_parallel": true,
"self": 0.00644375400008812,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003982195999924443,
"count": 8,
"is_parallel": true,
"self": 0.003982195999924443
}
}
},
"UnityEnvironment.step": {
"total": 0.050613548000001174,
"count": 1,
"is_parallel": true,
"self": 0.0005365579999079273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005165820000456733,
"count": 1,
"is_parallel": true,
"self": 0.0005165820000456733
},
"communicator.exchange": {
"total": 0.04757401999995636,
"count": 1,
"is_parallel": true,
"self": 0.04757401999995636
},
"steps_from_proto": {
"total": 0.0019863880000912104,
"count": 1,
"is_parallel": true,
"self": 0.00046808300010070525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015183049999905052,
"count": 8,
"is_parallel": true,
"self": 0.0015183049999905052
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 756.7521362289984,
"count": 42434,
"is_parallel": true,
"self": 21.030148722005947,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.69013084297376,
"count": 42434,
"is_parallel": true,
"self": 17.69013084297376
},
"communicator.exchange": {
"total": 647.3793244129985,
"count": 42434,
"is_parallel": true,
"self": 647.3793244129985
},
"steps_from_proto": {
"total": 70.65253225102026,
"count": 42434,
"is_parallel": true,
"self": 18.244157922028194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.40837432899207,
"count": 339472,
"is_parallel": true,
"self": 52.40837432899207
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 534.8900081020042,
"count": 42435,
"self": 2.1160900819980952,
"children": {
"process_trajectory": {
"total": 119.68441864100407,
"count": 42435,
"self": 119.5698528990041,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11456574199996794,
"count": 1,
"self": 0.11456574199996794
}
}
},
"_update_policy": {
"total": 413.08949937900206,
"count": 296,
"self": 164.31402788099615,
"children": {
"TorchPPOOptimizer.update": {
"total": 248.7754714980059,
"count": 15312,
"self": 248.7754714980059
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.462000338709913e-06,
"count": 1,
"self": 2.462000338709913e-06
},
"TrainerController._save_models": {
"total": 0.17141015100014556,
"count": 1,
"self": 0.002410419000170805,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16899973199997476,
"count": 1,
"self": 0.16899973199997476
}
}
}
}
}
}
}