ppo-Pyramids / run_logs /timers.json
dsteiner93's picture
First Push
ab0bb83 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.24800357222557068,
"min": 0.2137782722711563,
"max": 1.4664843082427979,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 7416.298828125,
"min": 6454.3935546875,
"max": 44487.26953125,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979960.0,
"min": 29952.0,
"max": 1979960.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979960.0,
"min": 29952.0,
"max": 1979960.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7750120162963867,
"min": -0.09869357943534851,
"max": 0.8393959999084473,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 227.07852172851562,
"min": -23.587764739990234,
"max": 255.00885009765625,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0029208590276539326,
"min": -0.028419921174645424,
"max": 0.40528467297554016,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.8558117151260376,
"min": -7.957577705383301,
"max": 96.0524673461914,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07155785263780634,
"min": 0.06401901965817706,
"max": 0.07438902306105975,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0018099369292888,
"min": 0.5083555090211027,
"max": 1.0637672329253025,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013265262272902529,
"min": 0.00010484618660803808,
"max": 0.017355979519336307,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1857136718206354,
"min": 0.0013630004259044951,
"max": 0.25093374454302775,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.28389109587857e-06,
"min": 5.28389109587857e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.397447534229998e-05,
"min": 7.397447534229998e-05,
"max": 0.00363756683747775,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10176126428571429,
"min": 0.10176126428571429,
"max": 0.19919177142857142,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4246577,
"min": 1.3845568000000001,
"max": 2.6001557000000006,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018595030214285714,
"min": 0.00018595030214285714,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00260330423,
"min": 0.00260330423,
"max": 0.12126097277500002,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007138731889426708,
"min": 0.0066531808115541935,
"max": 0.4042229652404785,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09994224458932877,
"min": 0.09314452856779099,
"max": 2.8295607566833496,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 236.575,
"min": 216.13669064748203,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28389.0,
"min": 15984.0,
"max": 32945.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7478561864411535,
"min": -1.0000000521540642,
"max": 1.772834574705676,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 211.49059855937958,
"min": -32.000001668930054,
"max": 245.95659844577312,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7478561864411535,
"min": -1.0000000521540642,
"max": 1.772834574705676,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 211.49059855937958,
"min": -32.000001668930054,
"max": 245.95659844577312,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.017675159525868005,
"min": 0.016706443338698036,
"max": 9.123851827345788,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.1386943026300287,
"min": 2.062561700882725,
"max": 145.98162923753262,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707067574",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 3 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707072245"
},
"total": 4671.053423575,
"count": 1,
"self": 0.42601222099983715,
"children": {
"run_training.setup": {
"total": 0.045467555999493925,
"count": 1,
"self": 0.045467555999493925
},
"TrainerController.start_learning": {
"total": 4670.581943798001,
"count": 1,
"self": 2.7136775419048718,
"children": {
"TrainerController._reset_env": {
"total": 2.26388002099975,
"count": 1,
"self": 2.26388002099975
},
"TrainerController.advance": {
"total": 4665.5103417900955,
"count": 129516,
"self": 3.0415786475441564,
"children": {
"env_step": {
"total": 3423.667683731792,
"count": 129516,
"self": 3159.7675410407937,
"children": {
"SubprocessEnvManager._take_step": {
"total": 262.16158983772493,
"count": 129516,
"self": 9.592200320927986,
"children": {
"TorchPolicy.evaluate": {
"total": 252.56938951679695,
"count": 125067,
"self": 252.56938951679695
}
}
},
"workers": {
"total": 1.7385528532731769,
"count": 129516,
"self": 0.0,
"children": {
"worker_root": {
"total": 4659.887258529914,
"count": 129516,
"is_parallel": true,
"self": 1737.8123599386809,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020462779993977165,
"count": 1,
"is_parallel": true,
"self": 0.000630208000075072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014160699993226444,
"count": 8,
"is_parallel": true,
"self": 0.0014160699993226444
}
}
},
"UnityEnvironment.step": {
"total": 0.05177360400011821,
"count": 1,
"is_parallel": true,
"self": 0.0006036209988451446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005070510005680262,
"count": 1,
"is_parallel": true,
"self": 0.0005070510005680262
},
"communicator.exchange": {
"total": 0.04902255900015007,
"count": 1,
"is_parallel": true,
"self": 0.04902255900015007
},
"steps_from_proto": {
"total": 0.0016403730005549733,
"count": 1,
"is_parallel": true,
"self": 0.00032830899999680696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013120640005581663,
"count": 8,
"is_parallel": true,
"self": 0.0013120640005581663
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2922.0748985912333,
"count": 129515,
"is_parallel": true,
"self": 70.0589943893192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.7430342850148,
"count": 129515,
"is_parallel": true,
"self": 51.7430342850148
},
"communicator.exchange": {
"total": 2595.7864470538952,
"count": 129515,
"is_parallel": true,
"self": 2595.7864470538952
},
"steps_from_proto": {
"total": 204.486422863004,
"count": 129515,
"is_parallel": true,
"self": 41.12764777292796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 163.35877509007605,
"count": 1036120,
"is_parallel": true,
"self": 163.35877509007605
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1238.8010794107595,
"count": 129516,
"self": 5.311919556172143,
"children": {
"process_trajectory": {
"total": 256.67963393858463,
"count": 129516,
"self": 256.2886781425832,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3909557960014354,
"count": 4,
"self": 0.3909557960014354
}
}
},
"_update_policy": {
"total": 976.8095259160027,
"count": 915,
"self": 569.3073360291091,
"children": {
"TorchPPOOptimizer.update": {
"total": 407.5021898868936,
"count": 45636,
"self": 407.5021898868936
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.229000190505758e-06,
"count": 1,
"self": 1.229000190505758e-06
},
"TrainerController._save_models": {
"total": 0.09404321600050025,
"count": 1,
"self": 0.0014257790007832227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09261743699971703,
"count": 1,
"self": 0.09261743699971703
}
}
}
}
}
}
}