ppo-Huggy / run_logs /timers.json
Smone55's picture
Huggy
9a3daea
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.409485936164856,
"min": 1.409485936164856,
"max": 1.4277399778366089,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70777.3359375,
"min": 67761.671875,
"max": 75260.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.74691358024691,
"min": 88.4136690647482,
"max": 382.5530303030303,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49449.0,
"min": 48983.0,
"max": 50497.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999976.0,
"min": 49879.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999976.0,
"min": 49879.0,
"max": 1999976.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3759729862213135,
"min": 0.0616707019507885,
"max": 2.4457292556762695,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1154.722900390625,
"min": 8.078862190246582,
"max": 1359.926513671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6434858729564605,
"min": 1.8409578920775698,
"max": 3.900073066772706,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1770.7341342568398,
"min": 241.16548386216164,
"max": 2119.378028512001,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6434858729564605,
"min": 1.8409578920775698,
"max": 3.900073066772706,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1770.7341342568398,
"min": 241.16548386216164,
"max": 2119.378028512001,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01530717530293057,
"min": 0.013443922079265272,
"max": 0.019799396011512725,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04592152590879171,
"min": 0.026887844158530544,
"max": 0.05645338957207666,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.048621591553092,
"min": 0.023109287892778715,
"max": 0.06388570467631022,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.145864774659276,
"min": 0.04621857578555743,
"max": 0.18084294684231284,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.465898844733329e-06,
"min": 3.465898844733329e-06,
"max": 0.00029536185154605,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0397696534199988e-05,
"min": 1.0397696534199988e-05,
"max": 0.0008442286685904499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115526666666665,
"min": 0.10115526666666665,
"max": 0.19845395000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30346579999999995,
"min": 0.20745775000000002,
"max": 0.58140955,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.764780666666664e-05,
"min": 6.764780666666664e-05,
"max": 0.004922852105,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002029434199999999,
"min": 0.0002029434199999999,
"max": 0.014072336545000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681246954",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681249346"
},
"total": 2392.327014639,
"count": 1,
"self": 0.45548407399974167,
"children": {
"run_training.setup": {
"total": 0.11111068500008514,
"count": 1,
"self": 0.11111068500008514
},
"TrainerController.start_learning": {
"total": 2391.76041988,
"count": 1,
"self": 4.3212337959066645,
"children": {
"TrainerController._reset_env": {
"total": 4.908088823000071,
"count": 1,
"self": 4.908088823000071
},
"TrainerController.advance": {
"total": 2382.412539957093,
"count": 231930,
"self": 4.771987273222294,
"children": {
"env_step": {
"total": 1862.5439559608865,
"count": 231930,
"self": 1578.018875989957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 281.662120270945,
"count": 231930,
"self": 16.57796361790588,
"children": {
"TorchPolicy.evaluate": {
"total": 265.08415665303914,
"count": 222992,
"self": 265.08415665303914
}
}
},
"workers": {
"total": 2.862959699984458,
"count": 231930,
"self": 0.0,
"children": {
"worker_root": {
"total": 2383.8631030430233,
"count": 231930,
"is_parallel": true,
"self": 1092.2170993111613,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009012489999804529,
"count": 1,
"is_parallel": true,
"self": 0.0002688619999844377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006323869999960152,
"count": 2,
"is_parallel": true,
"self": 0.0006323869999960152
}
}
},
"UnityEnvironment.step": {
"total": 0.03819196500012367,
"count": 1,
"is_parallel": true,
"self": 0.0003020980002474971,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002696479998576251,
"count": 1,
"is_parallel": true,
"self": 0.0002696479998576251
},
"communicator.exchange": {
"total": 0.036967141000104675,
"count": 1,
"is_parallel": true,
"self": 0.036967141000104675
},
"steps_from_proto": {
"total": 0.0006530779999138758,
"count": 1,
"is_parallel": true,
"self": 0.0001867149999270623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004663629999868135,
"count": 2,
"is_parallel": true,
"self": 0.0004663629999868135
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1291.646003731862,
"count": 231929,
"is_parallel": true,
"self": 38.42129092391224,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.75316943601979,
"count": 231929,
"is_parallel": true,
"self": 79.75316943601979
},
"communicator.exchange": {
"total": 1084.454074891953,
"count": 231929,
"is_parallel": true,
"self": 1084.454074891953
},
"steps_from_proto": {
"total": 89.01746847997697,
"count": 231929,
"is_parallel": true,
"self": 33.39342824208279,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.62404023789418,
"count": 463858,
"is_parallel": true,
"self": 55.62404023789418
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 515.096596722984,
"count": 231930,
"self": 6.5668539030116335,
"children": {
"process_trajectory": {
"total": 131.248430944971,
"count": 231930,
"self": 129.71881703797067,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5296139070003392,
"count": 10,
"self": 1.5296139070003392
}
}
},
"_update_policy": {
"total": 377.2813118750014,
"count": 97,
"self": 317.41563827501045,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.86567359999094,
"count": 2910,
"self": 59.86567359999094
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.459998414560687e-07,
"count": 1,
"self": 7.459998414560687e-07
},
"TrainerController._save_models": {
"total": 0.11855655800036402,
"count": 1,
"self": 0.0020190650002405164,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1165374930001235,
"count": 1,
"self": 0.1165374930001235
}
}
}
}
}
}
}