ppo-Pyramids / run_logs /timers.json
gbellamy's picture
First Push
f14fc97
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.20191000401973724,
"min": 0.20191000401973724,
"max": 1.4101150035858154,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 6076.68359375,
"min": 6076.68359375,
"max": 42777.25,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5427449345588684,
"min": -0.133658304810524,
"max": 0.6147935390472412,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.34034729003906,
"min": -31.677017211914062,
"max": 172.75698852539062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0015564369969069958,
"min": -0.005906177684664726,
"max": 0.5428589582443237,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.43113306164741516,
"min": -1.5710432529449463,
"max": 128.65757751464844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06707062691266936,
"min": 0.0664865027796751,
"max": 0.07385363970877072,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.938988776777371,
"min": 0.48478579704884583,
"max": 1.0360435751693633,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014808768251901349,
"min": 0.0008708799850595067,
"max": 0.017246510391123588,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20732275552661888,
"min": 0.007335777905698434,
"max": 0.25869765586685384,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.588726041885716e-06,
"min": 7.588726041885716e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010624216458640003,
"min": 0.00010624216458640003,
"max": 0.0033773725742092,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252954285714286,
"min": 0.10252954285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354136,
"min": 1.3691136000000002,
"max": 2.4860288,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026270133142857144,
"min": 0.00026270133142857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036778186400000003,
"min": 0.0036778186400000003,
"max": 0.11259650091999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01376793347299099,
"min": 0.01376793347299099,
"max": 0.6629025340080261,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19275106489658356,
"min": 0.19275106489658356,
"max": 4.640317916870117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 342.56976744186045,
"min": 301.2315789473684,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29461.0,
"min": 15984.0,
"max": 31996.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5178650963271774,
"min": -1.0000000521540642,
"max": 1.63559155722982,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 130.53639828413725,
"min": -32.000001668930054,
"max": 155.3811979368329,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5178650963271774,
"min": -1.0000000521540642,
"max": 1.63559155722982,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 130.53639828413725,
"min": -32.000001668930054,
"max": 155.3811979368329,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.048491860370335794,
"min": 0.04747070257006654,
"max": 13.07496139779687,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.170299991848879,
"min": 4.170299991848879,
"max": 209.1993823647499,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687905151",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687907420"
},
"total": 2269.1066641720004,
"count": 1,
"self": 0.5760810200004016,
"children": {
"run_training.setup": {
"total": 0.07057941200037021,
"count": 1,
"self": 0.07057941200037021
},
"TrainerController.start_learning": {
"total": 2268.4600037399996,
"count": 1,
"self": 1.3816151739961242,
"children": {
"TrainerController._reset_env": {
"total": 4.749067709999963,
"count": 1,
"self": 4.749067709999963
},
"TrainerController.advance": {
"total": 2262.236313362004,
"count": 64032,
"self": 1.430788822901377,
"children": {
"env_step": {
"total": 1623.1682355680623,
"count": 64032,
"self": 1511.4908893400302,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.87431829598563,
"count": 64032,
"self": 4.761106101084806,
"children": {
"TorchPolicy.evaluate": {
"total": 106.11321219490083,
"count": 62558,
"self": 106.11321219490083
}
}
},
"workers": {
"total": 0.8030279320464615,
"count": 64032,
"self": 0.0,
"children": {
"worker_root": {
"total": 2263.340163677026,
"count": 64032,
"is_parallel": true,
"self": 866.1422354140518,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003528227000060724,
"count": 1,
"is_parallel": true,
"self": 0.0015520129995820753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001976214000478649,
"count": 8,
"is_parallel": true,
"self": 0.001976214000478649
}
}
},
"UnityEnvironment.step": {
"total": 0.04939793699986694,
"count": 1,
"is_parallel": true,
"self": 0.0005373210001380357,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005044259996793699,
"count": 1,
"is_parallel": true,
"self": 0.0005044259996793699
},
"communicator.exchange": {
"total": 0.04658426500009227,
"count": 1,
"is_parallel": true,
"self": 0.04658426500009227
},
"steps_from_proto": {
"total": 0.001771924999957264,
"count": 1,
"is_parallel": true,
"self": 0.00034567100010463037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014262539998526336,
"count": 8,
"is_parallel": true,
"self": 0.0014262539998526336
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1397.197928262974,
"count": 64031,
"is_parallel": true,
"self": 33.10495772901322,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.40305631294632,
"count": 64031,
"is_parallel": true,
"self": 22.40305631294632
},
"communicator.exchange": {
"total": 1239.4977482450386,
"count": 64031,
"is_parallel": true,
"self": 1239.4977482450386
},
"steps_from_proto": {
"total": 102.19216597597597,
"count": 64031,
"is_parallel": true,
"self": 20.203672994013232,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.98849298196274,
"count": 512248,
"is_parallel": true,
"self": 81.98849298196274
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.6372889710401,
"count": 64032,
"self": 2.520096737950098,
"children": {
"process_trajectory": {
"total": 109.53501341308447,
"count": 64032,
"self": 109.3282901920852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20672322099926532,
"count": 2,
"self": 0.20672322099926532
}
}
},
"_update_policy": {
"total": 525.5821788200055,
"count": 447,
"self": 335.78254726205296,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.79963155795258,
"count": 22818,
"self": 189.79963155795258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.799996405490674e-07,
"count": 1,
"self": 9.799996405490674e-07
},
"TrainerController._save_models": {
"total": 0.09300651400008064,
"count": 1,
"self": 0.0015150319995882455,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0914914820004924,
"count": 1,
"self": 0.0914914820004924
}
}
}
}
}
}
}