ppo-Pyramids / run_logs /timers.json
kupru's picture
first attempt
8d623e8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36411893367767334,
"min": 0.347688764333725,
"max": 1.434644103050232,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10818.7021484375,
"min": 10441.7890625,
"max": 43521.36328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6657077074050903,
"min": -0.07704223692417145,
"max": 0.6657077074050903,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 192.3895263671875,
"min": -18.490137100219727,
"max": 192.3895263671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.061358481645584106,
"min": -0.004013091325759888,
"max": 0.1963900625705719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 17.732601165771484,
"min": -1.083534598350525,
"max": 47.33000564575195,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0680326780627435,
"min": 0.06570963778780302,
"max": 0.07235078659141436,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0204901709411525,
"min": 0.4995702086919758,
"max": 1.0852617988712154,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016334848307491094,
"min": 0.001317103557566168,
"max": 0.016554153625163184,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24502272461236643,
"min": 0.011689779240457249,
"max": 0.24502272461236643,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.507477497540004e-06,
"min": 7.507477497540004e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011261216246310006,
"min": 0.00011261216246310006,
"max": 0.0036358201880599995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250246,
"min": 0.10250246,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375369,
"min": 1.3886848,
"max": 2.6119399999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002599957540000002,
"min": 0.0002599957540000002,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038999363100000028,
"min": 0.0038999363100000028,
"max": 0.12121280599999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009498043917119503,
"min": 0.009498043917119503,
"max": 0.37360522150993347,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14247065782546997,
"min": 0.13809536397457123,
"max": 2.615236520767212,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 275.60377358490564,
"min": 275.60377358490564,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29214.0,
"min": 15984.0,
"max": 34274.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6713028448678198,
"min": -1.0000000521540642,
"max": 1.6900277024153436,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 175.48679871112108,
"min": -30.534001648426056,
"max": 175.48679871112108,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6713028448678198,
"min": -1.0000000521540642,
"max": 1.6900277024153436,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 175.48679871112108,
"min": -30.534001648426056,
"max": 175.48679871112108,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.026794836364154306,
"min": 0.026794836364154306,
"max": 7.6457915138453245,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.813457818236202,
"min": 2.813457818236202,
"max": 122.33266422152519,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695402189",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695404575"
},
"total": 2385.3036532669994,
"count": 1,
"self": 0.5815064260004874,
"children": {
"run_training.setup": {
"total": 0.03940594199957559,
"count": 1,
"self": 0.03940594199957559
},
"TrainerController.start_learning": {
"total": 2384.6827408989993,
"count": 1,
"self": 1.4771219160647888,
"children": {
"TrainerController._reset_env": {
"total": 3.9737865759998385,
"count": 1,
"self": 3.9737865759998385
},
"TrainerController.advance": {
"total": 2379.131911624934,
"count": 64166,
"self": 1.5101090560656303,
"children": {
"env_step": {
"total": 1708.088338063846,
"count": 64166,
"self": 1591.8568740950595,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.3685499618241,
"count": 64166,
"self": 5.039768438806277,
"children": {
"TorchPolicy.evaluate": {
"total": 110.32878152301782,
"count": 62566,
"self": 110.32878152301782
}
}
},
"workers": {
"total": 0.8629140069624555,
"count": 64166,
"self": 0.0,
"children": {
"worker_root": {
"total": 2378.985732706218,
"count": 64166,
"is_parallel": true,
"self": 910.4156223261634,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002028414999585948,
"count": 1,
"is_parallel": true,
"self": 0.0006310759999905713,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013973389995953767,
"count": 8,
"is_parallel": true,
"self": 0.0013973389995953767
}
}
},
"UnityEnvironment.step": {
"total": 0.05440728299981856,
"count": 1,
"is_parallel": true,
"self": 0.0005948779999016551,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000477920999401249,
"count": 1,
"is_parallel": true,
"self": 0.000477920999401249
},
"communicator.exchange": {
"total": 0.05029380599989963,
"count": 1,
"is_parallel": true,
"self": 0.05029380599989963
},
"steps_from_proto": {
"total": 0.003040678000616026,
"count": 1,
"is_parallel": true,
"self": 0.0004284819988242816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026121960017917445,
"count": 8,
"is_parallel": true,
"self": 0.0026121960017917445
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1468.5701103800548,
"count": 64165,
"is_parallel": true,
"self": 34.89470621586588,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.23191262713226,
"count": 64165,
"is_parallel": true,
"self": 24.23191262713226
},
"communicator.exchange": {
"total": 1296.3889458991998,
"count": 64165,
"is_parallel": true,
"self": 1296.3889458991998
},
"steps_from_proto": {
"total": 113.05454563785679,
"count": 64165,
"is_parallel": true,
"self": 22.421051619830905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.63349401802589,
"count": 513320,
"is_parallel": true,
"self": 90.63349401802589
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 669.5334645050225,
"count": 64166,
"self": 2.8007377343710687,
"children": {
"process_trajectory": {
"total": 119.32672937566349,
"count": 64166,
"self": 119.10598056366325,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2207488120002381,
"count": 2,
"self": 0.2207488120002381
}
}
},
"_update_policy": {
"total": 547.4059973949879,
"count": 454,
"self": 356.3122683948932,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.09372900009475,
"count": 22812,
"self": 191.09372900009475
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.440009307581931e-07,
"count": 1,
"self": 9.440009307581931e-07
},
"TrainerController._save_models": {
"total": 0.09991983799955051,
"count": 1,
"self": 0.001401557999997749,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09851827999955276,
"count": 1,
"self": 0.09851827999955276
}
}
}
}
}
}
}