ppo-pyramids / run_logs /timers.json
jpopham91's picture
init
00549a0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5488837361335754,
"min": 0.5337144136428833,
"max": 1.474669098854065,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16536.76953125,
"min": 15934.578125,
"max": 44735.5625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5010565519332886,
"min": -0.10021898150444031,
"max": 0.5225381255149841,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 136.78843688964844,
"min": -24.252992630004883,
"max": 142.79653930664062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.012047871015965939,
"min": -0.012047871015965939,
"max": 0.22089549899101257,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.2890686988830566,
"min": -3.2890686988830566,
"max": 53.45671081542969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06884061784995621,
"min": 0.06175746645091014,
"max": 0.07310531354977727,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9637686498993869,
"min": 0.5040401198142529,
"max": 1.0558351363094214,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013363974388846633,
"min": 0.00025186436255217375,
"max": 0.016111914801151435,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18709564144385285,
"min": 0.0032742367131782584,
"max": 0.2255668072161201,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.80704025482857e-06,
"min": 7.80704025482857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010929856356759999,
"min": 0.00010929856356759999,
"max": 0.0036339427886858005,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1026023142857143,
"min": 0.1026023142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4364324000000002,
"min": 1.3886848,
"max": 2.6113142000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026997119714285714,
"min": 0.00026997119714285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037795967599999996,
"min": 0.0037795967599999996,
"max": 0.12115028857999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007526513189077377,
"min": 0.007526513189077377,
"max": 0.3377963602542877,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10537118464708328,
"min": 0.10537118464708328,
"max": 2.364574432373047,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 382.7792207792208,
"min": 354.0952380952381,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29474.0,
"min": 15984.0,
"max": 32965.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5132882907599599,
"min": -1.0000000521540642,
"max": 1.5982761679305917,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 116.5231983885169,
"min": -30.298601619899273,
"max": 134.2551981061697,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5132882907599599,
"min": -1.0000000521540642,
"max": 1.5982761679305917,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 116.5231983885169,
"min": -30.298601619899273,
"max": 134.2551981061697,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029588947844868842,
"min": 0.028872005529046875,
"max": 6.725692554377019,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.278348984054901,
"min": 2.278348984054901,
"max": 107.61108087003231,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673552425",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673554515"
},
"total": 2089.981602036,
"count": 1,
"self": 0.4956146790004823,
"children": {
"run_training.setup": {
"total": 0.11550448499974664,
"count": 1,
"self": 0.11550448499974664
},
"TrainerController.start_learning": {
"total": 2089.3704828719997,
"count": 1,
"self": 1.2128892488858583,
"children": {
"TrainerController._reset_env": {
"total": 6.407020196999838,
"count": 1,
"self": 6.407020196999838
},
"TrainerController.advance": {
"total": 2081.6621458921127,
"count": 63744,
"self": 1.258059208189934,
"children": {
"env_step": {
"total": 1407.0128187879845,
"count": 63744,
"self": 1303.6920006480445,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.55684481595699,
"count": 63744,
"self": 4.238790038999923,
"children": {
"TorchPolicy.evaluate": {
"total": 98.31805477695707,
"count": 62554,
"self": 33.229381515961904,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.08867326099517,
"count": 62554,
"self": 65.08867326099517
}
}
}
}
},
"workers": {
"total": 0.7639733239830093,
"count": 63744,
"self": 0.0,
"children": {
"worker_root": {
"total": 2084.6728612269117,
"count": 63744,
"is_parallel": true,
"self": 879.2828317009453,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002079570000205422,
"count": 1,
"is_parallel": true,
"self": 0.0007011020006757462,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013784679995296756,
"count": 8,
"is_parallel": true,
"self": 0.0013784679995296756
}
}
},
"UnityEnvironment.step": {
"total": 0.048759442000118725,
"count": 1,
"is_parallel": true,
"self": 0.0005395960001806088,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004822930000045744,
"count": 1,
"is_parallel": true,
"self": 0.0004822930000045744
},
"communicator.exchange": {
"total": 0.045895489000031375,
"count": 1,
"is_parallel": true,
"self": 0.045895489000031375
},
"steps_from_proto": {
"total": 0.001842063999902166,
"count": 1,
"is_parallel": true,
"self": 0.00044758199919670005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001394482000705466,
"count": 8,
"is_parallel": true,
"self": 0.001394482000705466
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1205.3900295259664,
"count": 63743,
"is_parallel": true,
"self": 28.2589688258372,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.2966975459899,
"count": 63743,
"is_parallel": true,
"self": 22.2966975459899
},
"communicator.exchange": {
"total": 1053.4526808760947,
"count": 63743,
"is_parallel": true,
"self": 1053.4526808760947
},
"steps_from_proto": {
"total": 101.38168227804454,
"count": 63743,
"is_parallel": true,
"self": 22.153585543940608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.22809673410393,
"count": 509944,
"is_parallel": true,
"self": 79.22809673410393
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 673.3912678959382,
"count": 63744,
"self": 2.326466235982025,
"children": {
"process_trajectory": {
"total": 146.0789317669778,
"count": 63744,
"self": 145.75749760697818,
"children": {
"RLTrainer._checkpoint": {
"total": 0.321434159999626,
"count": 2,
"self": 0.321434159999626
}
}
},
"_update_policy": {
"total": 524.9858698929784,
"count": 453,
"self": 203.30218520805056,
"children": {
"TorchPPOOptimizer.update": {
"total": 321.6836846849278,
"count": 22746,
"self": 321.6836846849278
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0110006769536994e-06,
"count": 1,
"self": 1.0110006769536994e-06
},
"TrainerController._save_models": {
"total": 0.08842652300063492,
"count": 1,
"self": 0.0013839050006936304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08704261799994129,
"count": 1,
"self": 0.08704261799994129
}
}
}
}
}
}
}