ppo-Huggy / run_logs /timers.json
Patt's picture
Huggy
3ba2982
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.400067687034607,
"min": 1.40004301071167,
"max": 1.4261637926101685,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70015.984375,
"min": 68150.015625,
"max": 76827.3984375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.97395833333333,
"min": 83.66440677966102,
"max": 430.56410256410254,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50097.0,
"min": 48668.0,
"max": 50376.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49744.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49744.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4270012378692627,
"min": -0.025212375447154045,
"max": 2.4784231185913086,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1397.9527587890625,
"min": -2.924635648727417,
"max": 1421.8824462890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.826621889757613,
"min": 1.758409293946521,
"max": 3.939447262390392,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2204.1342085003853,
"min": 203.97547809779644,
"max": 2204.1342085003853,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.826621889757613,
"min": 1.758409293946521,
"max": 3.939447262390392,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2204.1342085003853,
"min": 203.97547809779644,
"max": 2204.1342085003853,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014718990639570015,
"min": 0.014202029030032766,
"max": 0.019757556546634684,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04415697191871004,
"min": 0.028404058060065532,
"max": 0.05705198349120716,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06050051620437039,
"min": 0.024652261473238467,
"max": 0.06050051620437039,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18150154861311119,
"min": 0.049304522946476935,
"max": 0.18150154861311119,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4703488432499974e-06,
"min": 3.4703488432499974e-06,
"max": 0.00029538405153864996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0411046529749992e-05,
"min": 1.0411046529749992e-05,
"max": 0.0008442444185852,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115674999999998,
"min": 0.10115674999999998,
"max": 0.19846135000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30347024999999994,
"min": 0.20743860000000003,
"max": 0.5814148,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.772182499999994e-05,
"min": 6.772182499999994e-05,
"max": 0.004923221364999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020316547499999983,
"min": 0.00020316547499999983,
"max": 0.014072598519999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676720144",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676722680"
},
"total": 2535.585510776,
"count": 1,
"self": 0.39559828299979927,
"children": {
"run_training.setup": {
"total": 0.12020942999998852,
"count": 1,
"self": 0.12020942999998852
},
"TrainerController.start_learning": {
"total": 2535.069703063,
"count": 1,
"self": 4.50542782201228,
"children": {
"TrainerController._reset_env": {
"total": 10.963274830999978,
"count": 1,
"self": 10.963274830999978
},
"TrainerController.advance": {
"total": 2519.484893178988,
"count": 232083,
"self": 4.906642081909467,
"children": {
"env_step": {
"total": 1968.977377748151,
"count": 232083,
"self": 1638.31587198325,
"children": {
"SubprocessEnvManager._take_step": {
"total": 327.6726412449687,
"count": 232083,
"self": 16.632803745979402,
"children": {
"TorchPolicy.evaluate": {
"total": 311.0398374989893,
"count": 222890,
"self": 75.82431342497108,
"children": {
"TorchPolicy.sample_actions": {
"total": 235.21552407401822,
"count": 222890,
"self": 235.21552407401822
}
}
}
}
},
"workers": {
"total": 2.988864519932406,
"count": 232083,
"self": 0.0,
"children": {
"worker_root": {
"total": 2525.8099126429593,
"count": 232083,
"is_parallel": true,
"self": 1195.7775146549072,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022723730000393516,
"count": 1,
"is_parallel": true,
"self": 0.0003518700000881836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001920502999951168,
"count": 2,
"is_parallel": true,
"self": 0.001920502999951168
}
}
},
"UnityEnvironment.step": {
"total": 0.050184978999993746,
"count": 1,
"is_parallel": true,
"self": 0.00028831000003037843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002096679999681328,
"count": 1,
"is_parallel": true,
"self": 0.0002096679999681328
},
"communicator.exchange": {
"total": 0.04878820800001904,
"count": 1,
"is_parallel": true,
"self": 0.04878820800001904
},
"steps_from_proto": {
"total": 0.0008987929999761946,
"count": 1,
"is_parallel": true,
"self": 0.00042558199999120916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004732109999849854,
"count": 2,
"is_parallel": true,
"self": 0.0004732109999849854
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1330.032397988052,
"count": 232082,
"is_parallel": true,
"self": 39.67496199603397,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.37281893905333,
"count": 232082,
"is_parallel": true,
"self": 85.37281893905333
},
"communicator.exchange": {
"total": 1106.9129390900116,
"count": 232082,
"is_parallel": true,
"self": 1106.9129390900116
},
"steps_from_proto": {
"total": 98.07167796295323,
"count": 232082,
"is_parallel": true,
"self": 42.8233285749252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.24834938802803,
"count": 464164,
"is_parallel": true,
"self": 55.24834938802803
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 545.6008733489275,
"count": 232083,
"self": 6.859150434965272,
"children": {
"process_trajectory": {
"total": 175.7907249649623,
"count": 232083,
"self": 174.44410254596283,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3466224189994591,
"count": 10,
"self": 1.3466224189994591
}
}
},
"_update_policy": {
"total": 362.95099794899994,
"count": 97,
"self": 304.8926417070028,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.05835624199716,
"count": 2910,
"self": 58.05835624199716
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.029996934055816e-07,
"count": 1,
"self": 8.029996934055816e-07
},
"TrainerController._save_models": {
"total": 0.1161064279999664,
"count": 1,
"self": 0.002284842999870307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11382158500009609,
"count": 1,
"self": 0.11382158500009609
}
}
}
}
}
}
}