ppo-Huggy / run_logs /timers.json
Glen's picture
Huggy
afc8722
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4032162427902222,
"min": 1.4032162427902222,
"max": 1.429914951324463,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70424.6171875,
"min": 68777.703125,
"max": 76907.1640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 111.56888888888889,
"min": 101.34349593495935,
"max": 406.349593495935,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50206.0,
"min": 48966.0,
"max": 50206.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49849.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49849.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3431763648986816,
"min": 0.11437373608350754,
"max": 2.395718574523926,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1054.4293212890625,
"min": 13.953596115112305,
"max": 1160.463623046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.4492892079883153,
"min": 1.8362670323399246,
"max": 3.7080619692552514,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1552.1801435947418,
"min": 224.0245779454708,
"max": 1768.745559334755,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.4492892079883153,
"min": 1.8362670323399246,
"max": 3.7080619692552514,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1552.1801435947418,
"min": 224.0245779454708,
"max": 1768.745559334755,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018615519227144738,
"min": 0.01343886714882198,
"max": 0.019678219385514242,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.037231038454289475,
"min": 0.02687773429764396,
"max": 0.05717192582002705,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05392906082173189,
"min": 0.02141245361417532,
"max": 0.07404458137849967,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10785812164346378,
"min": 0.04282490722835064,
"max": 0.21095143978794414,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.178423607225005e-06,
"min": 4.178423607225005e-06,
"max": 0.00029535705154764994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.35684721445001e-06,
"min": 8.35684721445001e-06,
"max": 0.00084427126857625,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10139277500000003,
"min": 0.10139277500000003,
"max": 0.19845234999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20278555000000006,
"min": 0.20278555000000006,
"max": 0.58142375,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.949947250000008e-05,
"min": 7.949947250000008e-05,
"max": 0.004922772265,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015899894500000017,
"min": 0.00015899894500000017,
"max": 0.014073045125,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670806101",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670808362"
},
"total": 2261.823518115,
"count": 1,
"self": 0.4372362780000003,
"children": {
"run_training.setup": {
"total": 0.10793290400010846,
"count": 1,
"self": 0.10793290400010846
},
"TrainerController.start_learning": {
"total": 2261.278348933,
"count": 1,
"self": 3.9863323299123294,
"children": {
"TrainerController._reset_env": {
"total": 10.44704694799998,
"count": 1,
"self": 10.44704694799998
},
"TrainerController.advance": {
"total": 2246.722608846088,
"count": 230566,
"self": 4.366591080988201,
"children": {
"env_step": {
"total": 1783.6455616420546,
"count": 230566,
"self": 1493.8266709491759,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.041940917911,
"count": 230566,
"self": 15.096436973807613,
"children": {
"TorchPolicy.evaluate": {
"total": 271.9455039441034,
"count": 222986,
"self": 69.59770601612081,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.34779792798258,
"count": 222986,
"self": 202.34779792798258
}
}
}
}
},
"workers": {
"total": 2.776949774967761,
"count": 230566,
"self": 0.0,
"children": {
"worker_root": {
"total": 2252.988584664042,
"count": 230566,
"is_parallel": true,
"self": 1027.7381725589278,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001953949000153443,
"count": 1,
"is_parallel": true,
"self": 0.00032261999990623735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016313290002472058,
"count": 2,
"is_parallel": true,
"self": 0.0016313290002472058
}
}
},
"UnityEnvironment.step": {
"total": 0.03219797400015523,
"count": 1,
"is_parallel": true,
"self": 0.0002192260003539559,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001997349997964193,
"count": 1,
"is_parallel": true,
"self": 0.0001997349997964193
},
"communicator.exchange": {
"total": 0.031323040999950535,
"count": 1,
"is_parallel": true,
"self": 0.031323040999950535
},
"steps_from_proto": {
"total": 0.0004559720000543166,
"count": 1,
"is_parallel": true,
"self": 0.0001639800000248215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0002919920000294951,
"count": 2,
"is_parallel": true,
"self": 0.0002919920000294951
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1225.250412105114,
"count": 230565,
"is_parallel": true,
"self": 35.296823857001755,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.78852929209756,
"count": 230565,
"is_parallel": true,
"self": 76.78852929209756
},
"communicator.exchange": {
"total": 1017.9298173519903,
"count": 230565,
"is_parallel": true,
"self": 1017.9298173519903
},
"steps_from_proto": {
"total": 95.2352416040244,
"count": 230565,
"is_parallel": true,
"self": 38.641687700939656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.59355390308474,
"count": 461130,
"is_parallel": true,
"self": 56.59355390308474
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 458.71045612304533,
"count": 230566,
"self": 6.832291306156776,
"children": {
"process_trajectory": {
"total": 141.02452218388794,
"count": 230566,
"self": 140.5394461268877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48507605700024214,
"count": 4,
"self": 0.48507605700024214
}
}
},
"_update_policy": {
"total": 310.8536426330006,
"count": 96,
"self": 257.12204278699664,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.731599846003974,
"count": 2880,
"self": 53.731599846003974
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.199999683711212e-07,
"count": 1,
"self": 8.199999683711212e-07
},
"TrainerController._save_models": {
"total": 0.12235998899996048,
"count": 1,
"self": 0.0026767050003400072,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11968328399962047,
"count": 1,
"self": 0.11968328399962047
}
}
}
}
}
}
}