ppo-Pyramids / run_logs /timers.json
vagi's picture
First Push
b6d8286 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6819160580635071,
"min": 0.6629638075828552,
"max": 1.51837158203125,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20173.8046875,
"min": 19915.412109375,
"max": 46061.3203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.15569959580898285,
"min": -0.11837504804134369,
"max": 0.17404457926750183,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 39.236297607421875,
"min": -28.52838706970215,
"max": 43.68518829345703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007443465758115053,
"min": 0.007443465758115053,
"max": 0.2775042653083801,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.875753402709961,
"min": 1.875753402709961,
"max": 65.76850891113281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06699290539206744,
"min": 0.06408625017045814,
"max": 0.07272980823710957,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9379006754889441,
"min": 0.4728908034181356,
"max": 1.0771633558761098,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007444984943315574,
"min": 6.690513093737361e-05,
"max": 0.009175153382229422,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10422978920641804,
"min": 0.0009366718331232306,
"max": 0.13762730073344132,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.435383235857145e-06,
"min": 7.435383235857145e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010409536530200003,
"min": 0.00010409536530200003,
"max": 0.0030236870921044002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247842857142857,
"min": 0.10247842857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434698,
"min": 1.3691136000000002,
"max": 2.3173566,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002575950142857143,
"min": 0.0002575950142857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036063302000000005,
"min": 0.0036063302000000005,
"max": 0.10081877043999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012148250825703144,
"min": 0.012148250825703144,
"max": 0.34503912925720215,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17007550597190857,
"min": 0.17007550597190857,
"max": 2.415273904800415,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 703.6,
"min": 693.4634146341464,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31662.0,
"min": 15984.0,
"max": 33053.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.5406221856673559,
"min": -1.0000000521540642,
"max": 0.8140399737283588,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 24.327998355031013,
"min": -32.000001668930054,
"max": 32.56159894913435,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.5406221856673559,
"min": -1.0000000521540642,
"max": 0.8140399737283588,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 24.327998355031013,
"min": -32.000001668930054,
"max": 32.56159894913435,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0879827772050501,
"min": 0.0879827772050501,
"max": 6.846857777796686,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9592249742272543,
"min": 3.6500879912055098,
"max": 109.54972444474697,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1732036099",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1732038956"
},
"total": 2856.238191001,
"count": 1,
"self": 0.7398672869994698,
"children": {
"run_training.setup": {
"total": 0.11820819499985191,
"count": 1,
"self": 0.11820819499985191
},
"TrainerController.start_learning": {
"total": 2855.3801155190004,
"count": 1,
"self": 2.2687031999748797,
"children": {
"TrainerController._reset_env": {
"total": 6.362156989999903,
"count": 1,
"self": 6.362156989999903
},
"TrainerController.advance": {
"total": 2846.593039698026,
"count": 63182,
"self": 2.381631643080709,
"children": {
"env_step": {
"total": 1817.9159717169057,
"count": 63182,
"self": 1663.7332222608275,
"children": {
"SubprocessEnvManager._take_step": {
"total": 152.84359284206357,
"count": 63182,
"self": 6.5422145490315415,
"children": {
"TorchPolicy.evaluate": {
"total": 146.30137829303203,
"count": 62556,
"self": 146.30137829303203
}
}
},
"workers": {
"total": 1.3391566140146551,
"count": 63182,
"self": 0.0,
"children": {
"worker_root": {
"total": 2849.0949210699923,
"count": 63182,
"is_parallel": true,
"self": 1352.0353206569912,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024923729999954958,
"count": 1,
"is_parallel": true,
"self": 0.0007344349994582444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017579380005372514,
"count": 8,
"is_parallel": true,
"self": 0.0017579380005372514
}
}
},
"UnityEnvironment.step": {
"total": 0.06701166799985003,
"count": 1,
"is_parallel": true,
"self": 0.0007330779997118952,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004973550001068361,
"count": 1,
"is_parallel": true,
"self": 0.0004973550001068361
},
"communicator.exchange": {
"total": 0.0637939800001277,
"count": 1,
"is_parallel": true,
"self": 0.0637939800001277
},
"steps_from_proto": {
"total": 0.0019872549999035982,
"count": 1,
"is_parallel": true,
"self": 0.0004332210000939085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015540339998096897,
"count": 8,
"is_parallel": true,
"self": 0.0015540339998096897
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1497.0596004130011,
"count": 63181,
"is_parallel": true,
"self": 47.85649653601081,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.995009046025416,
"count": 63181,
"is_parallel": true,
"self": 29.995009046025416
},
"communicator.exchange": {
"total": 1293.9749133059663,
"count": 63181,
"is_parallel": true,
"self": 1293.9749133059663
},
"steps_from_proto": {
"total": 125.2331815249986,
"count": 63181,
"is_parallel": true,
"self": 26.472981750106328,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.76019977489227,
"count": 505448,
"is_parallel": true,
"self": 98.76019977489227
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1026.2954363380397,
"count": 63182,
"self": 3.881703021027988,
"children": {
"process_trajectory": {
"total": 154.36951624900826,
"count": 63182,
"self": 153.99242839200838,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37708785699987857,
"count": 2,
"self": 0.37708785699987857
}
}
},
"_update_policy": {
"total": 868.0442170680035,
"count": 426,
"self": 363.7552467800549,
"children": {
"TorchPPOOptimizer.update": {
"total": 504.2889702879486,
"count": 22953,
"self": 504.2889702879486
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4019997252034955e-06,
"count": 1,
"self": 1.4019997252034955e-06
},
"TrainerController._save_models": {
"total": 0.15621422899948811,
"count": 1,
"self": 0.0034367259995633503,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15277750299992476,
"count": 1,
"self": 0.15277750299992476
}
}
}
}
}
}
}