ppo-Huggy / run_logs /timers.json
MUTSC's picture
Huggy
046868d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4088613986968994,
"min": 1.4088613986968994,
"max": 1.42890202999115,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69706.234375,
"min": 68588.7109375,
"max": 76706.0234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.65587734241907,
"min": 78.88870967741936,
"max": 402.088,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49106.0,
"min": 48911.0,
"max": 50261.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49696.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49696.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.412590742111206,
"min": -0.001070452737621963,
"max": 2.450330972671509,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1416.1907958984375,
"min": -0.13273614645004272,
"max": 1483.0888671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7433942005012875,
"min": 1.8023117237514066,
"max": 3.9808995340888207,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2197.372395694256,
"min": 223.4866537451744,
"max": 2362.2698134183884,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7433942005012875,
"min": 1.8023117237514066,
"max": 3.9808995340888207,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2197.372395694256,
"min": 223.4866537451744,
"max": 2362.2698134183884,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016720450219549498,
"min": 0.013718927883504269,
"max": 0.02127690303216999,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05016135065864849,
"min": 0.027437855767008537,
"max": 0.05908297306547562,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061391010300980674,
"min": 0.023320291377604007,
"max": 0.061391010300980674,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18417303090294201,
"min": 0.046640582755208014,
"max": 0.18417303090294201,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5998988000666644e-06,
"min": 3.5998988000666644e-06,
"max": 0.000295340476553175,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0799696400199993e-05,
"min": 1.0799696400199993e-05,
"max": 0.00084414946861685,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10119993333333333,
"min": 0.10119993333333333,
"max": 0.19844682500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035998,
"min": 0.20752820000000002,
"max": 0.58138315,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.98766733333333e-05,
"min": 6.98766733333333e-05,
"max": 0.0049224965675,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002096300199999999,
"min": 0.0002096300199999999,
"max": 0.014071019184999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692094433",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692096814"
},
"total": 2380.85135177,
"count": 1,
"self": 0.7607726040000671,
"children": {
"run_training.setup": {
"total": 0.04233614600002511,
"count": 1,
"self": 0.04233614600002511
},
"TrainerController.start_learning": {
"total": 2380.04824302,
"count": 1,
"self": 4.202821396966101,
"children": {
"TrainerController._reset_env": {
"total": 4.115225146000057,
"count": 1,
"self": 4.115225146000057
},
"TrainerController.advance": {
"total": 2371.5518494660346,
"count": 232378,
"self": 4.411337404020742,
"children": {
"env_step": {
"total": 1826.6234119399774,
"count": 232378,
"self": 1543.362116950818,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.49006645115753,
"count": 232378,
"self": 16.6406435052819,
"children": {
"TorchPolicy.evaluate": {
"total": 263.84942294587563,
"count": 222882,
"self": 263.84942294587563
}
}
},
"workers": {
"total": 2.7712285380017647,
"count": 232378,
"self": 0.0,
"children": {
"worker_root": {
"total": 2372.6955459539654,
"count": 232378,
"is_parallel": true,
"self": 1112.1983071258662,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008800589999964359,
"count": 1,
"is_parallel": true,
"self": 0.00024046899989116355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006395900001052723,
"count": 2,
"is_parallel": true,
"self": 0.0006395900001052723
}
}
},
"UnityEnvironment.step": {
"total": 0.028346284999997806,
"count": 1,
"is_parallel": true,
"self": 0.00035227900013978797,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021714799993333145,
"count": 1,
"is_parallel": true,
"self": 0.00021714799993333145
},
"communicator.exchange": {
"total": 0.027033058000029087,
"count": 1,
"is_parallel": true,
"self": 0.027033058000029087
},
"steps_from_proto": {
"total": 0.0007437999998956002,
"count": 1,
"is_parallel": true,
"self": 0.00020314299990786822,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000540656999987732,
"count": 2,
"is_parallel": true,
"self": 0.000540656999987732
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1260.4972388280992,
"count": 232377,
"is_parallel": true,
"self": 39.67354965902882,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.36679362898212,
"count": 232377,
"is_parallel": true,
"self": 79.36679362898212
},
"communicator.exchange": {
"total": 1045.711774872986,
"count": 232377,
"is_parallel": true,
"self": 1045.711774872986
},
"steps_from_proto": {
"total": 95.74512066710213,
"count": 232377,
"is_parallel": true,
"self": 33.66841799999338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.07670266710875,
"count": 464754,
"is_parallel": true,
"self": 62.07670266710875
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 540.5171001220363,
"count": 232378,
"self": 6.297236983086236,
"children": {
"process_trajectory": {
"total": 136.90357280795024,
"count": 232378,
"self": 135.45234413194999,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4512286760002553,
"count": 10,
"self": 1.4512286760002553
}
}
},
"_update_policy": {
"total": 397.3162903309998,
"count": 97,
"self": 337.42805172499686,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.888238606002915,
"count": 2910,
"self": 59.888238606002915
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3289995877130423e-06,
"count": 1,
"self": 1.3289995877130423e-06
},
"TrainerController._save_models": {
"total": 0.17834568199987189,
"count": 1,
"self": 0.0029280479998305964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1754176340000413,
"count": 1,
"self": 0.1754176340000413
}
}
}
}
}
}
}