ppo-Huggy / run_logs /timers.json
rakeshjohny's picture
Huggy
ea13c78
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4025185108184814,
"min": 1.4025185108184814,
"max": 1.4251314401626587,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69573.3359375,
"min": 68726.3359375,
"max": 76737.1015625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.07207207207207,
"min": 81.36333333333333,
"max": 393.06299212598424,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49435.0,
"min": 48818.0,
"max": 50023.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999957.0,
"min": 49816.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999957.0,
"min": 49816.0,
"max": 1999957.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.491619825363159,
"min": 0.2265482097864151,
"max": 2.4945766925811768,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1382.8489990234375,
"min": 28.545074462890625,
"max": 1467.5557861328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8764313474431766,
"min": 1.9059532355694544,
"max": 4.010094936735727,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2151.419397830963,
"min": 240.15010768175125,
"max": 2373.97620254755,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8764313474431766,
"min": 1.9059532355694544,
"max": 4.010094936735727,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2151.419397830963,
"min": 240.15010768175125,
"max": 2373.97620254755,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01941072404410483,
"min": 0.013241641582377875,
"max": 0.020212581800539434,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05823217213231449,
"min": 0.029064337518502723,
"max": 0.05823217213231449,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05435991912252373,
"min": 0.021483722856889167,
"max": 0.06534912292328145,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1630797573675712,
"min": 0.042967445713778335,
"max": 0.19604736876984435,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6442987852666643e-06,
"min": 3.6442987852666643e-06,
"max": 0.00029538960153679993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0932896355799993e-05,
"min": 1.0932896355799993e-05,
"max": 0.0008443566185478,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121473333333335,
"min": 0.10121473333333335,
"max": 0.1984632000000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30364420000000003,
"min": 0.20761495,
"max": 0.5814522,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.06151933333333e-05,
"min": 7.06151933333333e-05,
"max": 0.00492331368,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002118455799999999,
"min": 0.0002118455799999999,
"max": 0.014074464779999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670784507",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670786822"
},
"total": 2314.90661954,
"count": 1,
"self": 0.3908458910000263,
"children": {
"run_training.setup": {
"total": 0.10941405800008397,
"count": 1,
"self": 0.10941405800008397
},
"TrainerController.start_learning": {
"total": 2314.406359591,
"count": 1,
"self": 4.1061630219760445,
"children": {
"TrainerController._reset_env": {
"total": 10.14317178400006,
"count": 1,
"self": 10.14317178400006
},
"TrainerController.advance": {
"total": 2300.0348902600235,
"count": 232401,
"self": 4.4026291540017155,
"children": {
"env_step": {
"total": 1816.922071210969,
"count": 232401,
"self": 1524.367961413847,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.82034404704655,
"count": 232401,
"self": 15.4617666560805,
"children": {
"TorchPolicy.evaluate": {
"total": 274.35857739096605,
"count": 222887,
"self": 68.48895671693344,
"children": {
"TorchPolicy.sample_actions": {
"total": 205.8696206740326,
"count": 222887,
"self": 205.8696206740326
}
}
}
}
},
"workers": {
"total": 2.733765750075463,
"count": 232401,
"self": 0.0,
"children": {
"worker_root": {
"total": 2306.334204843104,
"count": 232401,
"is_parallel": true,
"self": 1055.543143033998,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001981913000008717,
"count": 1,
"is_parallel": true,
"self": 0.00033416300016142486,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016477499998472922,
"count": 2,
"is_parallel": true,
"self": 0.0016477499998472922
}
}
},
"UnityEnvironment.step": {
"total": 0.03184128299994882,
"count": 1,
"is_parallel": true,
"self": 0.00032279399988510704,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001942950000284327,
"count": 1,
"is_parallel": true,
"self": 0.0001942950000284327
},
"communicator.exchange": {
"total": 0.02863610300005348,
"count": 1,
"is_parallel": true,
"self": 0.02863610300005348
},
"steps_from_proto": {
"total": 0.002688090999981796,
"count": 1,
"is_parallel": true,
"self": 0.002206445999945572,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004816450000362238,
"count": 2,
"is_parallel": true,
"self": 0.0004816450000362238
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1250.7910618091062,
"count": 232400,
"is_parallel": true,
"self": 35.55599757404116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.48772056205212,
"count": 232400,
"is_parallel": true,
"self": 81.48772056205212
},
"communicator.exchange": {
"total": 1036.2140927970486,
"count": 232400,
"is_parallel": true,
"self": 1036.2140927970486
},
"steps_from_proto": {
"total": 97.53325087596431,
"count": 232400,
"is_parallel": true,
"self": 43.0889224557186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.44432842024571,
"count": 464800,
"is_parallel": true,
"self": 54.44432842024571
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 478.71018989505274,
"count": 232401,
"self": 6.089587530122344,
"children": {
"process_trajectory": {
"total": 153.2754166459306,
"count": 232401,
"self": 152.771036728931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5043799169995964,
"count": 4,
"self": 0.5043799169995964
}
}
},
"_update_policy": {
"total": 319.3451857189998,
"count": 97,
"self": 265.53164720101927,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.81353851798053,
"count": 2910,
"self": 53.81353851798053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1700003597070463e-06,
"count": 1,
"self": 1.1700003597070463e-06
},
"TrainerController._save_models": {
"total": 0.1221333550001873,
"count": 1,
"self": 0.002050009999948088,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1200833450002392,
"count": 1,
"self": 0.1200833450002392
}
}
}
}
}
}
}