ppo-Pyramids / run_logs /timers.json
viccelmar's picture
First Push
ea187a2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3886653780937195,
"min": 0.3886653780937195,
"max": 1.48847496509552,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11821.646484375,
"min": 11821.646484375,
"max": 45154.375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5604120492935181,
"min": -0.27312779426574707,
"max": 0.6007319688796997,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 156.91537475585938,
"min": -64.73128509521484,
"max": 168.2049560546875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022871237248182297,
"min": -0.03366792947053909,
"max": 0.19346365332603455,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.403946399688721,
"min": -9.292348861694336,
"max": 46.62474060058594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06925965592520134,
"min": 0.0660663541258102,
"max": 0.07321228132267214,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9696351829528187,
"min": 0.5075589953294497,
"max": 1.0663296644052023,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01610296498672271,
"min": 0.0021425522801248797,
"max": 0.017292298195700693,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22544150981411798,
"min": 0.02731902868918487,
"max": 0.24209217473980968,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.538518915764291e-06,
"min": 7.538518915764291e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010553926482070008,
"min": 0.00010553926482070008,
"max": 0.0035087876304041995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251280714285715,
"min": 0.10251280714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351793000000002,
"min": 1.3886848,
"max": 2.617957,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026102943357142873,
"min": 0.00026102943357142873,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003654412070000002,
"min": 0.003654412070000002,
"max": 0.11698262041999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009437923319637775,
"min": 0.008843580260872841,
"max": 0.3118534982204437,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1321309208869934,
"min": 0.1300508975982666,
"max": 2.1829745769500732,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 343.82105263157894,
"min": 298.3030303030303,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32663.0,
"min": 15984.0,
"max": 33292.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5496999803692737,
"min": -1.0000000521540642,
"max": 1.661278772685263,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.67179815471172,
"min": -27.3230018094182,
"max": 164.46659849584103,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5496999803692737,
"min": -1.0000000521540642,
"max": 1.661278772685263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.67179815471172,
"min": -27.3230018094182,
"max": 164.46659849584103,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03318444501441108,
"min": 0.028758911434749162,
"max": 6.161439008079469,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.119337831354642,
"min": 2.8140907826600596,
"max": 98.58302412927151,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711105616",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711107903"
},
"total": 2287.444130099,
"count": 1,
"self": 0.526343903000452,
"children": {
"run_training.setup": {
"total": 0.08420583099996293,
"count": 1,
"self": 0.08420583099996293
},
"TrainerController.start_learning": {
"total": 2286.8335803649998,
"count": 1,
"self": 1.4539740188997712,
"children": {
"TrainerController._reset_env": {
"total": 2.337443857999915,
"count": 1,
"self": 2.337443857999915
},
"TrainerController.advance": {
"total": 2282.955220387101,
"count": 63985,
"self": 1.446988678141679,
"children": {
"env_step": {
"total": 1631.9518841929905,
"count": 63985,
"self": 1496.2179924859836,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.8574191769685,
"count": 63985,
"self": 4.807125874956455,
"children": {
"TorchPolicy.evaluate": {
"total": 130.05029330201205,
"count": 62551,
"self": 130.05029330201205
}
}
},
"workers": {
"total": 0.8764725300384271,
"count": 63985,
"self": 0.0,
"children": {
"worker_root": {
"total": 2281.666616679987,
"count": 63985,
"is_parallel": true,
"self": 909.0536149240004,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023957529999734106,
"count": 1,
"is_parallel": true,
"self": 0.0006842439997853944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017115090001880162,
"count": 8,
"is_parallel": true,
"self": 0.0017115090001880162
}
}
},
"UnityEnvironment.step": {
"total": 0.04806105999978172,
"count": 1,
"is_parallel": true,
"self": 0.0006680279998363403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005008379998798773,
"count": 1,
"is_parallel": true,
"self": 0.0005008379998798773
},
"communicator.exchange": {
"total": 0.04517311600011453,
"count": 1,
"is_parallel": true,
"self": 0.04517311600011453
},
"steps_from_proto": {
"total": 0.0017190779999509687,
"count": 1,
"is_parallel": true,
"self": 0.00035946899993177794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013596090000191907,
"count": 8,
"is_parallel": true,
"self": 0.0013596090000191907
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1372.6130017559865,
"count": 63984,
"is_parallel": true,
"self": 37.300765591091476,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.182592151006475,
"count": 63984,
"is_parallel": true,
"self": 25.182592151006475
},
"communicator.exchange": {
"total": 1205.0046438919512,
"count": 63984,
"is_parallel": true,
"self": 1205.0046438919512
},
"steps_from_proto": {
"total": 105.12500012193732,
"count": 63984,
"is_parallel": true,
"self": 21.260655753850187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.86434436808713,
"count": 511872,
"is_parallel": true,
"self": 83.86434436808713
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 649.5563475159688,
"count": 63985,
"self": 2.7685868209907767,
"children": {
"process_trajectory": {
"total": 130.115908119981,
"count": 63985,
"self": 129.90787601798024,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20803210200074318,
"count": 2,
"self": 0.20803210200074318
}
}
},
"_update_policy": {
"total": 516.671852574997,
"count": 455,
"self": 303.46967882100444,
"children": {
"TorchPPOOptimizer.update": {
"total": 213.2021737539926,
"count": 22794,
"self": 213.2021737539926
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.429995770915411e-07,
"count": 1,
"self": 9.429995770915411e-07
},
"TrainerController._save_models": {
"total": 0.08694115799971769,
"count": 1,
"self": 0.001489010999648599,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08545214700006909,
"count": 1,
"self": 0.08545214700006909
}
}
}
}
}
}
}