ppo-Huggy / run_logs /timers.json
jaweed123's picture
Huggy
4c6e8c0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3996119499206543,
"min": 1.3996119499206543,
"max": 1.4235711097717285,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70923.9375,
"min": 68790.875,
"max": 78139.2421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.98260869565217,
"min": 83.88682432432432,
"max": 395.171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50015.0,
"min": 48977.0,
"max": 50582.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999877.0,
"min": 49960.0,
"max": 1999877.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999877.0,
"min": 49960.0,
"max": 1999877.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.432559013366699,
"min": -0.009346923790872097,
"max": 2.473235845565796,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1398.721435546875,
"min": -1.1870592832565308,
"max": 1446.843017578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.809989967035211,
"min": 1.7068521691354241,
"max": 3.957794803009887,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2190.744231045246,
"min": 216.77022548019886,
"max": 2270.387732923031,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.809989967035211,
"min": 1.7068521691354241,
"max": 3.957794803009887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2190.744231045246,
"min": 216.77022548019886,
"max": 2270.387732923031,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01674877669138368,
"min": 0.013889118964774145,
"max": 0.019608087521111078,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05024633007415104,
"min": 0.02777823792954829,
"max": 0.05882426256333323,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05733967601425118,
"min": 0.023296201353271803,
"max": 0.06173272132873534,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17201902804275354,
"min": 0.046592402706543606,
"max": 0.18519816398620603,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3731488756499994e-06,
"min": 3.3731488756499994e-06,
"max": 0.00029536620154459994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0119446626949999e-05,
"min": 1.0119446626949999e-05,
"max": 0.000844239018587,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112434999999999,
"min": 0.10112434999999999,
"max": 0.1984554,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30337305,
"min": 0.20738279999999998,
"max": 0.581413,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.610506499999998e-05,
"min": 6.610506499999998e-05,
"max": 0.00492292446,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019831519499999994,
"min": 0.00019831519499999994,
"max": 0.014072508699999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698015483",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698017990"
},
"total": 2506.4956202579997,
"count": 1,
"self": 0.7994484279997778,
"children": {
"run_training.setup": {
"total": 0.052262064999922586,
"count": 1,
"self": 0.052262064999922586
},
"TrainerController.start_learning": {
"total": 2505.643909765,
"count": 1,
"self": 4.5122975149770355,
"children": {
"TrainerController._reset_env": {
"total": 8.71720808100008,
"count": 1,
"self": 8.71720808100008
},
"TrainerController.advance": {
"total": 2492.263361407023,
"count": 232126,
"self": 4.900094076095684,
"children": {
"env_step": {
"total": 1982.269238343968,
"count": 232126,
"self": 1630.3100237819988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.99904081200657,
"count": 232126,
"self": 16.681664509948178,
"children": {
"TorchPolicy.evaluate": {
"total": 332.3173763020584,
"count": 222958,
"self": 332.3173763020584
}
}
},
"workers": {
"total": 2.960173749962678,
"count": 232126,
"self": 0.0,
"children": {
"worker_root": {
"total": 2497.729734196118,
"count": 232126,
"is_parallel": true,
"self": 1174.6050972110743,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008886849998361868,
"count": 1,
"is_parallel": true,
"self": 0.00027928899999096757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006093959998452192,
"count": 2,
"is_parallel": true,
"self": 0.0006093959998452192
}
}
},
"UnityEnvironment.step": {
"total": 0.03175039900020238,
"count": 1,
"is_parallel": true,
"self": 0.0003200810001544596,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019542500012903474,
"count": 1,
"is_parallel": true,
"self": 0.00019542500012903474
},
"communicator.exchange": {
"total": 0.030524907999961215,
"count": 1,
"is_parallel": true,
"self": 0.030524907999961215
},
"steps_from_proto": {
"total": 0.0007099849999576691,
"count": 1,
"is_parallel": true,
"self": 0.00019475499993859557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005152300000190735,
"count": 2,
"is_parallel": true,
"self": 0.0005152300000190735
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1323.1246369850437,
"count": 232125,
"is_parallel": true,
"self": 41.38498195803163,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.38415392695265,
"count": 232125,
"is_parallel": true,
"self": 89.38415392695265
},
"communicator.exchange": {
"total": 1098.8032133279207,
"count": 232125,
"is_parallel": true,
"self": 1098.8032133279207
},
"steps_from_proto": {
"total": 93.5522877721387,
"count": 232125,
"is_parallel": true,
"self": 35.26442329604242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.287864476096274,
"count": 464250,
"is_parallel": true,
"self": 58.287864476096274
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 505.0940289869591,
"count": 232126,
"self": 6.89999527511759,
"children": {
"process_trajectory": {
"total": 159.2084057698421,
"count": 232126,
"self": 158.01372094384215,
"children": {
"RLTrainer._checkpoint": {
"total": 1.194684825999957,
"count": 10,
"self": 1.194684825999957
}
}
},
"_update_policy": {
"total": 338.9856279419994,
"count": 97,
"self": 276.5922843749986,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.393343567000784,
"count": 2910,
"self": 62.393343567000784
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3000003491470125e-06,
"count": 1,
"self": 1.3000003491470125e-06
},
"TrainerController._save_models": {
"total": 0.1510414619997391,
"count": 1,
"self": 0.0029737919994659023,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1480676700002732,
"count": 1,
"self": 0.1480676700002732
}
}
}
}
}
}
}