ppo-Huggy / run_logs /timers.json
shru456's picture
Huggy
c0118c2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4066535234451294,
"min": 1.4066535234451294,
"max": 1.4283028841018677,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71383.4453125,
"min": 67888.8828125,
"max": 78473.3515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.63047285464098,
"min": 78.40699523052464,
"max": 423.20338983050846,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49466.0,
"min": 48846.0,
"max": 50007.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999941.0,
"min": 49964.0,
"max": 1999941.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999941.0,
"min": 49964.0,
"max": 1999941.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4398484230041504,
"min": 0.0848558247089386,
"max": 2.5060489177703857,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1393.1534423828125,
"min": 9.928131103515625,
"max": 1505.491943359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7468909203693035,
"min": 1.745855960326317,
"max": 4.0958801593696865,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2139.4747155308723,
"min": 204.2651473581791,
"max": 2404.6900685429573,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7468909203693035,
"min": 1.745855960326317,
"max": 4.0958801593696865,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2139.4747155308723,
"min": 204.2651473581791,
"max": 2404.6900685429573,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018531714734207425,
"min": 0.014499044874618347,
"max": 0.01964454108375422,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05559514420262228,
"min": 0.029068834505354364,
"max": 0.05893362325126266,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05513675643338098,
"min": 0.02095219393571218,
"max": 0.07919476913909117,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16541026930014294,
"min": 0.04190438787142436,
"max": 0.23758430741727352,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.691148769650006e-06,
"min": 3.691148769650006e-06,
"max": 0.00029526322657892504,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1073446308950018e-05,
"min": 1.1073446308950018e-05,
"max": 0.0008438913187028998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123034999999998,
"min": 0.10123034999999998,
"max": 0.19842107500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30369104999999996,
"min": 0.20766990000000007,
"max": 0.5812970999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.139446500000012e-05,
"min": 7.139446500000012e-05,
"max": 0.004921211642500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021418339500000035,
"min": 0.00021418339500000035,
"max": 0.01406672529,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679079196",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679081524"
},
"total": 2327.554592171,
"count": 1,
"self": 0.390679082999668,
"children": {
"run_training.setup": {
"total": 0.17915927299998202,
"count": 1,
"self": 0.17915927299998202
},
"TrainerController.start_learning": {
"total": 2326.984753815,
"count": 1,
"self": 4.280017935047908,
"children": {
"TrainerController._reset_env": {
"total": 9.020437541000007,
"count": 1,
"self": 9.020437541000007
},
"TrainerController.advance": {
"total": 2313.5690859989522,
"count": 232342,
"self": 4.584842764978021,
"children": {
"env_step": {
"total": 1806.9442595700596,
"count": 232342,
"self": 1519.0787625031423,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.07672137596046,
"count": 232342,
"self": 17.367366932025277,
"children": {
"TorchPolicy.evaluate": {
"total": 267.7093544439352,
"count": 222998,
"self": 267.7093544439352
}
}
},
"workers": {
"total": 2.788775690956811,
"count": 232342,
"self": 0.0,
"children": {
"worker_root": {
"total": 2318.931395893937,
"count": 232342,
"is_parallel": true,
"self": 1086.9051845139288,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001086928999995962,
"count": 1,
"is_parallel": true,
"self": 0.00029735999999047635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007895690000054856,
"count": 2,
"is_parallel": true,
"self": 0.0007895690000054856
}
}
},
"UnityEnvironment.step": {
"total": 0.028238348000002134,
"count": 1,
"is_parallel": true,
"self": 0.0003259629999945446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022311300000410483,
"count": 1,
"is_parallel": true,
"self": 0.00022311300000410483
},
"communicator.exchange": {
"total": 0.02702455700000428,
"count": 1,
"is_parallel": true,
"self": 0.02702455700000428
},
"steps_from_proto": {
"total": 0.0006647149999992052,
"count": 1,
"is_parallel": true,
"self": 0.00019713099999307815,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004675840000061271,
"count": 2,
"is_parallel": true,
"self": 0.0004675840000061271
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1232.0262113800081,
"count": 232341,
"is_parallel": true,
"self": 37.78783713078565,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.86412689109812,
"count": 232341,
"is_parallel": true,
"self": 75.86412689109812
},
"communicator.exchange": {
"total": 1030.5913016481045,
"count": 232341,
"is_parallel": true,
"self": 1030.5913016481045
},
"steps_from_proto": {
"total": 87.78294571002004,
"count": 232341,
"is_parallel": true,
"self": 33.04602662788142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.73691908213863,
"count": 464682,
"is_parallel": true,
"self": 54.73691908213863
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 502.0399836639146,
"count": 232342,
"self": 6.5851008849084,
"children": {
"process_trajectory": {
"total": 138.97733464800615,
"count": 232342,
"self": 137.67700130900573,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3003333390004173,
"count": 10,
"self": 1.3003333390004173
}
}
},
"_update_policy": {
"total": 356.47754813100005,
"count": 97,
"self": 298.17648693799725,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.301061193002795,
"count": 2910,
"self": 58.301061193002795
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3020003279962111e-06,
"count": 1,
"self": 1.3020003279962111e-06
},
"TrainerController._save_models": {
"total": 0.11521103799987031,
"count": 1,
"self": 0.0019183489998795267,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11329268899999079,
"count": 1,
"self": 0.11329268899999079
}
}
}
}
}
}
}