ppo-PiramidsRND / run_logs /timers.json
IrinaArcadievna's picture
First Push
9b475d0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27946722507476807,
"min": 0.27946722507476807,
"max": 1.4223023653030396,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8495.8037109375,
"min": 8495.8037109375,
"max": 43146.96484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7026473879814148,
"min": -0.11263036727905273,
"max": 0.760591983795166,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 205.17303466796875,
"min": -27.143918991088867,
"max": 224.3746337890625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025793733075261116,
"min": 0.00900337565690279,
"max": 0.5020925402641296,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.5317702293396,
"min": 2.5209450721740723,
"max": 118.99592590332031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0703423618155493,
"min": 0.06437795411847118,
"max": 0.07471895710060031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9847930654176903,
"min": 0.5097872982950394,
"max": 1.0711176672633098,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015261912029583843,
"min": 0.0009527518301034166,
"max": 0.015269969657224247,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2136667684141738,
"min": 0.009766532110378118,
"max": 0.22453860809522055,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.4479883266307144e-05,
"min": 2.4479883266307144e-05,
"max": 0.0009838354301878855,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0003427183657283,
"min": 0.0003427183657283,
"max": 0.011694330230567,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244797857142858,
"min": 0.10244797857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342717,
"min": 1.3886848,
"max": 2.5694329999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025455305928571433,
"min": 0.00025455305928571433,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003563742830000001,
"min": 0.003563742830000001,
"max": 0.1169663567,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010976001620292664,
"min": 0.010976001620292664,
"max": 0.5342116355895996,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1536640226840973,
"min": 0.1536640226840973,
"max": 3.7394814491271973,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 268.4424778761062,
"min": 258.52100840336135,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30334.0,
"min": 15984.0,
"max": 33558.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7135553508996964,
"min": -1.0000000521540642,
"max": 1.7414705661915932,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 191.918199300766,
"min": -30.510001733899117,
"max": 207.23499737679958,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7135553508996964,
"min": -1.0000000521540642,
"max": 1.7414705661915932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 191.918199300766,
"min": -30.510001733899117,
"max": 207.23499737679958,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030496460790995376,
"min": 0.030496460790995376,
"max": 10.688495183363557,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.415603608591482,
"min": 3.415603608591482,
"max": 171.0159229338169,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706877535",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706879884"
},
"total": 2348.9017937430003,
"count": 1,
"self": 0.49208894500043243,
"children": {
"run_training.setup": {
"total": 0.05560866000001852,
"count": 1,
"self": 0.05560866000001852
},
"TrainerController.start_learning": {
"total": 2348.354096138,
"count": 1,
"self": 1.4720224579873502,
"children": {
"TrainerController._reset_env": {
"total": 3.811920057000009,
"count": 1,
"self": 3.811920057000009
},
"TrainerController.advance": {
"total": 2342.9729329700126,
"count": 64399,
"self": 1.5376919379555147,
"children": {
"env_step": {
"total": 1705.4641717849822,
"count": 64399,
"self": 1566.3374322869417,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.24776781801552,
"count": 64399,
"self": 5.029337689989688,
"children": {
"TorchPolicy.evaluate": {
"total": 133.21843012802583,
"count": 62553,
"self": 133.21843012802583
}
}
},
"workers": {
"total": 0.8789716800249607,
"count": 64399,
"self": 0.0,
"children": {
"worker_root": {
"total": 2342.963256786994,
"count": 64399,
"is_parallel": true,
"self": 901.8530679970054,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005564585999991323,
"count": 1,
"is_parallel": true,
"self": 0.003567194999845924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019973910001453987,
"count": 8,
"is_parallel": true,
"self": 0.0019973910001453987
}
}
},
"UnityEnvironment.step": {
"total": 0.05456500899998673,
"count": 1,
"is_parallel": true,
"self": 0.0006460450000531637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005211519999761549,
"count": 1,
"is_parallel": true,
"self": 0.0005211519999761549
},
"communicator.exchange": {
"total": 0.05154936399998178,
"count": 1,
"is_parallel": true,
"self": 0.05154936399998178
},
"steps_from_proto": {
"total": 0.0018484479999756331,
"count": 1,
"is_parallel": true,
"self": 0.00041580499998872256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014326429999869106,
"count": 8,
"is_parallel": true,
"self": 0.0014326429999869106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1441.1101887899886,
"count": 64398,
"is_parallel": true,
"self": 36.31624070894054,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.8687088220276,
"count": 64398,
"is_parallel": true,
"self": 25.8687088220276
},
"communicator.exchange": {
"total": 1274.1377465600126,
"count": 64398,
"is_parallel": true,
"self": 1274.1377465600126
},
"steps_from_proto": {
"total": 104.78749269900771,
"count": 64398,
"is_parallel": true,
"self": 21.54303588214816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.24445681685955,
"count": 515184,
"is_parallel": true,
"self": 83.24445681685955
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 635.9710692470746,
"count": 64399,
"self": 2.7693037440922126,
"children": {
"process_trajectory": {
"total": 132.00520715498078,
"count": 64399,
"self": 131.8024335529808,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2027736019999793,
"count": 2,
"self": 0.2027736019999793
}
}
},
"_update_policy": {
"total": 501.19655834800164,
"count": 454,
"self": 293.0172479730079,
"children": {
"TorchPPOOptimizer.update": {
"total": 208.17931037499375,
"count": 22833,
"self": 208.17931037499375
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.790002539171837e-07,
"count": 1,
"self": 8.790002539171837e-07
},
"TrainerController._save_models": {
"total": 0.09721977399976822,
"count": 1,
"self": 0.0015638879999642086,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09565588599980401,
"count": 1,
"self": 0.09565588599980401
}
}
}
}
}
}
}