pyramidsrnd / run_logs /timers.json
K-Kemna's picture
20220817_Pyramids
34e0c0f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.49036848545074463,
"min": 0.486410915851593,
"max": 1.4026033878326416,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14687.5166015625,
"min": 14561.197265625,
"max": 42549.375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4223002791404724,
"min": -0.11120632290840149,
"max": 0.4663859009742737,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 111.90957641601562,
"min": -26.689517974853516,
"max": 125.9241943359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.029169755056500435,
"min": -0.04761064052581787,
"max": 0.5099064707756042,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.729985237121582,
"min": -12.99770450592041,
"max": 120.84783935546875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06943094012703369,
"min": 0.06517950096029035,
"max": 0.07293299871511323,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0414641019055053,
"min": 0.46201161289455145,
"max": 1.0462445375240206,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015245787126108778,
"min": 0.0003966667606918153,
"max": 0.015245787126108778,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22868680689163168,
"min": 0.005553334649685414,
"max": 0.22868680689163168,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.53697748770667e-06,
"min": 7.53697748770667e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011305466231560005,
"min": 0.00011305466231560005,
"max": 0.0035073248308918,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251229333333332,
"min": 0.10251229333333332,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376843999999998,
"min": 1.3691136000000002,
"max": 2.5691081999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002609781040000001,
"min": 0.0002609781040000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003914671560000002,
"min": 0.003914671560000002,
"max": 0.11693390918000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011221312917768955,
"min": 0.011221312917768955,
"max": 0.4665204584598541,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1683196872472763,
"min": 0.15938179194927216,
"max": 3.2656431198120117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 426.4782608695652,
"min": 399.64473684210526,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29427.0,
"min": 15984.0,
"max": 33142.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5155304109920626,
"min": -1.0000000521540642,
"max": 1.5155304109920626,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.57159835845232,
"min": -32.000001668930054,
"max": 108.96599830687046,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5155304109920626,
"min": -1.0000000521540642,
"max": 1.5155304109920626,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.57159835845232,
"min": -32.000001668930054,
"max": 108.96599830687046,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04952129925009828,
"min": 0.048265587657612435,
"max": 9.488236006349325,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4169696482567815,
"min": 3.4169696482567815,
"max": 151.8117761015892,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1660735750",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1660737898"
},
"total": 2148.161661367,
"count": 1,
"self": 0.47758982400000605,
"children": {
"run_training.setup": {
"total": 0.0425828500000307,
"count": 1,
"self": 0.0425828500000307
},
"TrainerController.start_learning": {
"total": 2147.6414886929997,
"count": 1,
"self": 1.631994849989951,
"children": {
"TrainerController._reset_env": {
"total": 10.765427003000013,
"count": 1,
"self": 10.765427003000013
},
"TrainerController.advance": {
"total": 2135.142926334009,
"count": 63519,
"self": 1.7786469871093686,
"children": {
"env_step": {
"total": 1399.9591235809191,
"count": 63519,
"self": 1279.1165842338612,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.97922112404171,
"count": 63519,
"self": 5.118945442051995,
"children": {
"TorchPolicy.evaluate": {
"total": 114.86027568198972,
"count": 62552,
"self": 38.07204256300179,
"children": {
"TorchPolicy.sample_actions": {
"total": 76.78823311898793,
"count": 62552,
"self": 76.78823311898793
}
}
}
}
},
"workers": {
"total": 0.863318223016222,
"count": 63519,
"self": 0.0,
"children": {
"worker_root": {
"total": 2143.273093531988,
"count": 63519,
"is_parallel": true,
"self": 979.0778995569606,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005019514999958119,
"count": 1,
"is_parallel": true,
"self": 0.0037374869997393034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012820280002188156,
"count": 8,
"is_parallel": true,
"self": 0.0012820280002188156
}
}
},
"UnityEnvironment.step": {
"total": 0.05164784700002656,
"count": 1,
"is_parallel": true,
"self": 0.0005350100001351166,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004698679999819433,
"count": 1,
"is_parallel": true,
"self": 0.0004698679999819433
},
"communicator.exchange": {
"total": 0.04866606299992782,
"count": 1,
"is_parallel": true,
"self": 0.04866606299992782
},
"steps_from_proto": {
"total": 0.001976905999981682,
"count": 1,
"is_parallel": true,
"self": 0.0004949650001435657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014819409998381161,
"count": 8,
"is_parallel": true,
"self": 0.0014819409998381161
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1164.1951939750272,
"count": 63518,
"is_parallel": true,
"self": 28.733637617027625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.1037943350384,
"count": 63518,
"is_parallel": true,
"self": 26.1037943350384
},
"communicator.exchange": {
"total": 1007.6158848889719,
"count": 63518,
"is_parallel": true,
"self": 1007.6158848889719
},
"steps_from_proto": {
"total": 101.7418771339893,
"count": 63518,
"is_parallel": true,
"self": 25.751826282848356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.99005085114095,
"count": 508144,
"is_parallel": true,
"self": 75.99005085114095
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 733.4051557659807,
"count": 63519,
"self": 2.9503203970115237,
"children": {
"process_trajectory": {
"total": 175.42409041196902,
"count": 63519,
"self": 175.21349838296896,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21059202900005403,
"count": 2,
"self": 0.21059202900005403
}
}
},
"_update_policy": {
"total": 555.0307449570001,
"count": 444,
"self": 220.0455289239943,
"children": {
"TorchPPOOptimizer.update": {
"total": 334.9852160330058,
"count": 22785,
"self": 334.9852160330058
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2530003914434928e-06,
"count": 1,
"self": 1.2530003914434928e-06
},
"TrainerController._save_models": {
"total": 0.10113925299992843,
"count": 1,
"self": 0.0018365819996688515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09930267100025958,
"count": 1,
"self": 0.09930267100025958
}
}
}
}
}
}
}