ppo-Pyramids / run_logs /timers.json
yingzhi's picture
First Push
413af49
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2831450402736664,
"min": 0.27647390961647034,
"max": 1.3314456939697266,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8458.1083984375,
"min": 8263.251953125,
"max": 40390.73828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5331242084503174,
"min": -0.08161784708499908,
"max": 0.6086713671684265,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.00978088378906,
"min": -19.66990089416504,
"max": 171.64532470703125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013147570192813873,
"min": 0.008359591476619244,
"max": 0.35875093936920166,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.576138973236084,
"min": 2.232010841369629,
"max": 86.10022735595703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07074687161920813,
"min": 0.06528083108291947,
"max": 0.07387718798893571,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9904562026689139,
"min": 0.5171403159225499,
"max": 1.0519662533884446,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014774359902297701,
"min": 0.0010657111923892332,
"max": 0.016704808535790972,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20684103863216782,
"min": 0.0127885343086708,
"max": 0.23386731950107362,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.44468323275714e-06,
"min": 7.44468323275714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010422556525859996,
"min": 0.00010422556525859996,
"max": 0.0033299562900145994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248152857142857,
"min": 0.10248152857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347414,
"min": 1.3886848,
"max": 2.4429815,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025790470428571426,
"min": 0.00025790470428571426,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00361066586,
"min": 0.00361066586,
"max": 0.11100754146000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011992672458291054,
"min": 0.011992672458291054,
"max": 0.44280704855918884,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16789741814136505,
"min": 0.16789741814136505,
"max": 3.099649429321289,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.632183908046,
"min": 323.7738095238095,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29374.0,
"min": 15984.0,
"max": 33480.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.587590790685566,
"min": -1.0000000521540642,
"max": 1.6524095114852702,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 138.12039878964424,
"min": -30.371001690626144,
"max": 152.8417981788516,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.587590790685566,
"min": -1.0000000521540642,
"max": 1.6524095114852702,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 138.12039878964424,
"min": -30.371001690626144,
"max": 152.8417981788516,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042656828530250525,
"min": 0.04155728800817467,
"max": 8.673599689267576,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.7111440821317956,
"min": 3.6239896253391635,
"max": 138.7775950282812,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682008481",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682010667"
},
"total": 2185.639243634,
"count": 1,
"self": 0.4746151539998209,
"children": {
"run_training.setup": {
"total": 0.18756779000023016,
"count": 1,
"self": 0.18756779000023016
},
"TrainerController.start_learning": {
"total": 2184.97706069,
"count": 1,
"self": 1.3177440959616433,
"children": {
"TrainerController._reset_env": {
"total": 4.206724615999974,
"count": 1,
"self": 4.206724615999974
},
"TrainerController.advance": {
"total": 2179.3499813390385,
"count": 63986,
"self": 1.4295142691371439,
"children": {
"env_step": {
"total": 1577.1854947989464,
"count": 63986,
"self": 1471.737605235025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.63379173691374,
"count": 63986,
"self": 4.7763616609377095,
"children": {
"TorchPolicy.evaluate": {
"total": 99.85743007597603,
"count": 62562,
"self": 99.85743007597603
}
}
},
"workers": {
"total": 0.814097827007572,
"count": 63986,
"self": 0.0,
"children": {
"worker_root": {
"total": 2180.2727685519753,
"count": 63986,
"is_parallel": true,
"self": 817.3340112089627,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027553879999686615,
"count": 1,
"is_parallel": true,
"self": 0.0008322000003317953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019231879996368662,
"count": 8,
"is_parallel": true,
"self": 0.0019231879996368662
}
}
},
"UnityEnvironment.step": {
"total": 0.0514542790001542,
"count": 1,
"is_parallel": true,
"self": 0.000545565000265924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000558977999844501,
"count": 1,
"is_parallel": true,
"self": 0.000558977999844501
},
"communicator.exchange": {
"total": 0.048624609999933455,
"count": 1,
"is_parallel": true,
"self": 0.048624609999933455
},
"steps_from_proto": {
"total": 0.001725126000110322,
"count": 1,
"is_parallel": true,
"self": 0.00038822100123070413,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013369049988796178,
"count": 8,
"is_parallel": true,
"self": 0.0013369049988796178
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1362.9387573430126,
"count": 63985,
"is_parallel": true,
"self": 31.63894305388567,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.451805826031432,
"count": 63985,
"is_parallel": true,
"self": 23.451805826031432
},
"communicator.exchange": {
"total": 1212.560337047049,
"count": 63985,
"is_parallel": true,
"self": 1212.560337047049
},
"steps_from_proto": {
"total": 95.28767141604658,
"count": 63985,
"is_parallel": true,
"self": 20.61780366001767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.6698677560289,
"count": 511880,
"is_parallel": true,
"self": 74.6698677560289
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 600.734972270955,
"count": 63986,
"self": 2.491997850956068,
"children": {
"process_trajectory": {
"total": 104.01509944899863,
"count": 63986,
"self": 103.81317694399877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20192250499985676,
"count": 2,
"self": 0.20192250499985676
}
}
},
"_update_policy": {
"total": 494.22787497100035,
"count": 448,
"self": 314.92275043594555,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.3051245350548,
"count": 22815,
"self": 179.3051245350548
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.699998943484388e-07,
"count": 1,
"self": 8.699998943484388e-07
},
"TrainerController._save_models": {
"total": 0.10260976900008245,
"count": 1,
"self": 0.001463841999793658,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1011459270002888,
"count": 1,
"self": 0.1011459270002888
}
}
}
}
}
}
}