RL_ppo-Huggy / run_logs /timers.json
tushar117's picture
Huggy
aabe019
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4034790992736816,
"min": 1.403475284576416,
"max": 1.428091049194336,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72816.703125,
"min": 68892.1640625,
"max": 78284.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.07722772277228,
"min": 89.5163043478261,
"max": 400.016,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49529.0,
"min": 48780.0,
"max": 50034.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49961.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49961.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3989076614379883,
"min": 0.08706027269363403,
"max": 2.435908317565918,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1211.4483642578125,
"min": 10.7954740524292,
"max": 1343.818359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.729642083503232,
"min": 1.8740369509304724,
"max": 3.886729035191907,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1883.4692521691322,
"min": 232.38058191537857,
"max": 2109.892471551895,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.729642083503232,
"min": 1.8740369509304724,
"max": 3.886729035191907,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1883.4692521691322,
"min": 232.38058191537857,
"max": 2109.892471551895,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016339236457133668,
"min": 0.01323441070174643,
"max": 0.020270675991196183,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.049017709371401,
"min": 0.02646882140349286,
"max": 0.05559710460171725,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05033494068516625,
"min": 0.022826730273663998,
"max": 0.05578981993926896,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15100482205549876,
"min": 0.046946378052234644,
"max": 0.16736945981780688,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5049988316999995e-06,
"min": 3.5049988316999995e-06,
"max": 0.00029536117654627495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0514996495099998e-05,
"min": 1.0514996495099998e-05,
"max": 0.0008442904685698497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116830000000003,
"min": 0.10116830000000003,
"max": 0.19845372500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035049000000001,
"min": 0.20750875000000002,
"max": 0.5814301500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.829816999999999e-05,
"min": 6.829816999999999e-05,
"max": 0.0049228408775,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020489450999999998,
"min": 0.00020489450999999998,
"max": 0.014073364485000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673187950",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673190333"
},
"total": 2382.9936336729997,
"count": 1,
"self": 0.39726506599981803,
"children": {
"run_training.setup": {
"total": 0.11386215500010621,
"count": 1,
"self": 0.11386215500010621
},
"TrainerController.start_learning": {
"total": 2382.482506452,
"count": 1,
"self": 4.1593095559919675,
"children": {
"TrainerController._reset_env": {
"total": 9.453990227000077,
"count": 1,
"self": 9.453990227000077
},
"TrainerController.advance": {
"total": 2368.746228826008,
"count": 232053,
"self": 4.354583601992545,
"children": {
"env_step": {
"total": 1884.0529575109026,
"count": 232053,
"self": 1583.6490670579865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.5624968259276,
"count": 232053,
"self": 15.35874086898707,
"children": {
"TorchPolicy.evaluate": {
"total": 282.20375595694054,
"count": 223111,
"self": 70.43461285483386,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.76914310210668,
"count": 223111,
"self": 211.76914310210668
}
}
}
}
},
"workers": {
"total": 2.84139362698852,
"count": 232053,
"self": 0.0,
"children": {
"worker_root": {
"total": 2373.8761813398914,
"count": 232053,
"is_parallel": true,
"self": 1071.0318077528734,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00223262400004387,
"count": 1,
"is_parallel": true,
"self": 0.0003404460003366694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018921779997072008,
"count": 2,
"is_parallel": true,
"self": 0.0018921779997072008
}
}
},
"UnityEnvironment.step": {
"total": 0.03195654800015291,
"count": 1,
"is_parallel": true,
"self": 0.0003220700002657395,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002124969998931192,
"count": 1,
"is_parallel": true,
"self": 0.0002124969998931192
},
"communicator.exchange": {
"total": 0.030580949000068358,
"count": 1,
"is_parallel": true,
"self": 0.030580949000068358
},
"steps_from_proto": {
"total": 0.0008410319999256899,
"count": 1,
"is_parallel": true,
"self": 0.0002835269999650336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005575049999606563,
"count": 2,
"is_parallel": true,
"self": 0.0005575049999606563
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1302.844373587018,
"count": 232052,
"is_parallel": true,
"self": 36.98233758020456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.25430394195155,
"count": 232052,
"is_parallel": true,
"self": 87.25430394195155
},
"communicator.exchange": {
"total": 1078.957618301021,
"count": 232052,
"is_parallel": true,
"self": 1078.957618301021
},
"steps_from_proto": {
"total": 99.65011376384086,
"count": 232052,
"is_parallel": true,
"self": 43.5440201927986,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.106093571042265,
"count": 464104,
"is_parallel": true,
"self": 56.106093571042265
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.3386877131129,
"count": 232053,
"self": 6.45431777509566,
"children": {
"process_trajectory": {
"total": 155.99041409201527,
"count": 232053,
"self": 154.77499646801516,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2154176240001107,
"count": 10,
"self": 1.2154176240001107
}
}
},
"_update_policy": {
"total": 317.893955846002,
"count": 97,
"self": 264.0536346710121,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.84032117498987,
"count": 2910,
"self": 53.84032117498987
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0399999155197293e-06,
"count": 1,
"self": 1.0399999155197293e-06
},
"TrainerController._save_models": {
"total": 0.1229768029998013,
"count": 1,
"self": 0.0020476849999795377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12092911799982176,
"count": 1,
"self": 0.12092911799982176
}
}
}
}
}
}
}