ppo-Huggy / run_logs /timers.json
torkable's picture
Huggy
0e55b43
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4080438613891602,
"min": 1.4080438613891602,
"max": 1.4280880689620972,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69501.046875,
"min": 68931.828125,
"max": 76610.859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.61872909698997,
"min": 76.70353302611367,
"max": 386.86821705426354,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49406.0,
"min": 48811.0,
"max": 50245.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49555.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49555.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4251930713653564,
"min": 0.07070478796958923,
"max": 2.496614933013916,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1450.2655029296875,
"min": 9.050212860107422,
"max": 1562.955322265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8352665524418934,
"min": 1.7730165781686082,
"max": 4.027887253071133,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2293.4893983602524,
"min": 226.94612200558186,
"max": 2470.3146238327026,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8352665524418934,
"min": 1.7730165781686082,
"max": 4.027887253071133,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2293.4893983602524,
"min": 226.94612200558186,
"max": 2470.3146238327026,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01695456433869064,
"min": 0.01428001681973304,
"max": 0.02014538683676316,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05086369301607192,
"min": 0.02856003363946608,
"max": 0.05907273976821064,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.057351839914917956,
"min": 0.022666173490385216,
"max": 0.063697347127729,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17205551974475386,
"min": 0.04533234698077043,
"max": 0.19109204138318697,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.69324876895e-06,
"min": 3.69324876895e-06,
"max": 0.0002953419015527001,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.107974630685e-05,
"min": 1.107974630685e-05,
"max": 0.0008441022186325999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123105,
"min": 0.10123105,
"max": 0.19844729999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30369315,
"min": 0.20762185000000005,
"max": 0.5813674000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.142939500000003e-05,
"min": 7.142939500000003e-05,
"max": 0.00492252027,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021428818500000008,
"min": 0.00021428818500000008,
"max": 0.014070233260000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695270919",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695272809"
},
"total": 1889.8145914180002,
"count": 1,
"self": 0.2746845240001221,
"children": {
"run_training.setup": {
"total": 0.03951431700011199,
"count": 1,
"self": 0.03951431700011199
},
"TrainerController.start_learning": {
"total": 1889.500392577,
"count": 1,
"self": 3.7120955271525418,
"children": {
"TrainerController._reset_env": {
"total": 4.100773897999943,
"count": 1,
"self": 4.100773897999943
},
"TrainerController.advance": {
"total": 1881.5797602008474,
"count": 233017,
"self": 3.862859777884978,
"children": {
"env_step": {
"total": 1433.398715697882,
"count": 233017,
"self": 1171.1640152088,
"children": {
"SubprocessEnvManager._take_step": {
"total": 259.7258223490419,
"count": 233017,
"self": 14.18268402699573,
"children": {
"TorchPolicy.evaluate": {
"total": 245.54313832204616,
"count": 222953,
"self": 245.54313832204616
}
}
},
"workers": {
"total": 2.5088781400399967,
"count": 233017,
"self": 0.0,
"children": {
"worker_root": {
"total": 1882.5696959400423,
"count": 233017,
"is_parallel": true,
"self": 935.1846920199546,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007270410001183336,
"count": 1,
"is_parallel": true,
"self": 0.00019888900033038226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005281519997879514,
"count": 2,
"is_parallel": true,
"self": 0.0005281519997879514
}
}
},
"UnityEnvironment.step": {
"total": 0.01982505000000856,
"count": 1,
"is_parallel": true,
"self": 0.0002469840003413992,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019172999986949435,
"count": 1,
"is_parallel": true,
"self": 0.00019172999986949435
},
"communicator.exchange": {
"total": 0.018886543000007805,
"count": 1,
"is_parallel": true,
"self": 0.018886543000007805
},
"steps_from_proto": {
"total": 0.0004997929997898609,
"count": 1,
"is_parallel": true,
"self": 0.00013625499968838994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00036353800010147097,
"count": 2,
"is_parallel": true,
"self": 0.00036353800010147097
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 947.3850039200877,
"count": 233016,
"is_parallel": true,
"self": 26.34514284308443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 47.66649855895071,
"count": 233016,
"is_parallel": true,
"self": 47.66649855895071
},
"communicator.exchange": {
"total": 810.9804278980116,
"count": 233016,
"is_parallel": true,
"self": 810.9804278980116
},
"steps_from_proto": {
"total": 62.39293462004093,
"count": 233016,
"is_parallel": true,
"self": 21.931173141024146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.461761479016786,
"count": 466032,
"is_parallel": true,
"self": 40.461761479016786
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 444.3181847250805,
"count": 233017,
"self": 5.778896599971631,
"children": {
"process_trajectory": {
"total": 117.92776378510985,
"count": 233017,
"self": 116.80104715611014,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1267166289997022,
"count": 10,
"self": 1.1267166289997022
}
}
},
"_update_policy": {
"total": 320.61152433999905,
"count": 97,
"self": 276.3347585310064,
"children": {
"TorchPPOOptimizer.update": {
"total": 44.27676580899265,
"count": 2910,
"self": 44.27676580899265
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.809999260643963e-07,
"count": 1,
"self": 7.809999260643963e-07
},
"TrainerController._save_models": {
"total": 0.10776217000011457,
"count": 1,
"self": 0.0017397860001437948,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10602238399997077,
"count": 1,
"self": 0.10602238399997077
}
}
}
}
}
}
}