bguan's picture
Pyramids 1M
9a50e1f
raw
history blame
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3403018116950989,
"min": 0.33989036083221436,
"max": 1.4787400960922241,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10279.8369140625,
"min": 10153.205078125,
"max": 44859.05859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989957.0,
"min": 29930.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989957.0,
"min": 29930.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6829650402069092,
"min": -0.08016442507505417,
"max": 0.6829650402069092,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 197.37689208984375,
"min": -19.319625854492188,
"max": 197.37689208984375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03257635980844498,
"min": -0.003456464735791087,
"max": 0.30738991498947144,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.414567947387695,
"min": -0.9194196462631226,
"max": 72.85140991210938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06892577857261717,
"min": 0.06532548673648886,
"max": 0.07271606729011094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9649609000166404,
"min": 0.49590733982943763,
"max": 1.0412436160664118,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017294092819613797,
"min": 0.0012610587729395045,
"max": 0.01856548032570185,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24211729947459318,
"min": 0.013871646502334549,
"max": 0.2599167245598259,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.576190331778572e-06,
"min": 7.576190331778572e-06,
"max": 0.0002952338587315714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001060666646449,
"min": 0.0001060666646449,
"max": 0.0036353938882020993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252536428571427,
"min": 0.10252536428571427,
"max": 0.1984112857142857,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353550999999998,
"min": 1.388879,
"max": 2.6117979,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026228389214285713,
"min": 0.00026228389214285713,
"max": 0.009841287442857142,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00367197449,
"min": 0.00367197449,
"max": 0.12119861021,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014970860444009304,
"min": 0.014970860444009304,
"max": 0.4294765889644623,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2095920443534851,
"min": 0.2095920443534851,
"max": 3.006336212158203,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 268.42727272727274,
"min": 268.42727272727274,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29527.0,
"min": 16729.0,
"max": 33909.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7315727158026262,
"min": -1.0000000521540642,
"max": 1.7315727158026262,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 190.47299873828888,
"min": -32.000001668930054,
"max": 190.47299873828888,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7315727158026262,
"min": -1.0000000521540642,
"max": 1.7315727158026262,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 190.47299873828888,
"min": -32.000001668930054,
"max": 190.47299873828888,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04155176631545394,
"min": 0.04155176631545394,
"max": 7.885958159671111,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.570694294699933,
"min": 4.570694294699933,
"max": 134.06128871440887,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1667449615",
"python_version": "3.7.15 (default, Oct 12 2022, 19:14:55) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --force --run-id=Pyramids Training 1M --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1667451482"
},
"total": 1866.8401939100002,
"count": 1,
"self": 0.27368371500006106,
"children": {
"run_training.setup": {
"total": 0.04176243599999907,
"count": 1,
"self": 0.04176243599999907
},
"TrainerController.start_learning": {
"total": 1866.524747759,
"count": 1,
"self": 1.4271246920311569,
"children": {
"TrainerController._reset_env": {
"total": 6.197527839000031,
"count": 1,
"self": 6.197527839000031
},
"TrainerController.advance": {
"total": 1858.814133506969,
"count": 64079,
"self": 1.4419256858536755,
"children": {
"env_step": {
"total": 1175.9662867840661,
"count": 64079,
"self": 1073.1034254040305,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.12683821801284,
"count": 64079,
"self": 4.348568938071935,
"children": {
"TorchPolicy.evaluate": {
"total": 97.7782692799409,
"count": 62560,
"self": 32.61998515889093,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.15828412104997,
"count": 62560,
"self": 65.15828412104997
}
}
}
}
},
"workers": {
"total": 0.7360231620226614,
"count": 64079,
"self": 0.0,
"children": {
"worker_root": {
"total": 1864.1272286309968,
"count": 64079,
"is_parallel": true,
"self": 886.7666300019822,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018675359999633656,
"count": 1,
"is_parallel": true,
"self": 0.0007020869996949841,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011654490002683815,
"count": 8,
"is_parallel": true,
"self": 0.0011654490002683815
}
}
},
"UnityEnvironment.step": {
"total": 0.03228973600005247,
"count": 1,
"is_parallel": true,
"self": 0.000304326000104993,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002880199999708566,
"count": 1,
"is_parallel": true,
"self": 0.0002880199999708566
},
"communicator.exchange": {
"total": 0.030732048999993822,
"count": 1,
"is_parallel": true,
"self": 0.030732048999993822
},
"steps_from_proto": {
"total": 0.0009653409999828,
"count": 1,
"is_parallel": true,
"self": 0.00026600900014273066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006993319998400693,
"count": 8,
"is_parallel": true,
"self": 0.0006993319998400693
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 977.3605986290146,
"count": 64078,
"is_parallel": true,
"self": 21.87021501304298,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.59127998603435,
"count": 64078,
"is_parallel": true,
"self": 17.59127998603435
},
"communicator.exchange": {
"total": 863.794905812976,
"count": 64078,
"is_parallel": true,
"self": 863.794905812976
},
"steps_from_proto": {
"total": 74.10419781696123,
"count": 64078,
"is_parallel": true,
"self": 19.304246030052013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.79995178690922,
"count": 512624,
"is_parallel": true,
"self": 54.79995178690922
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 681.4059210370493,
"count": 64079,
"self": 2.5928343620521446,
"children": {
"process_trajectory": {
"total": 155.36836284099877,
"count": 64079,
"self": 155.18077861699862,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18758422400014751,
"count": 2,
"self": 0.18758422400014751
}
}
},
"_update_policy": {
"total": 523.4447238339984,
"count": 456,
"self": 208.98151164199749,
"children": {
"TorchPPOOptimizer.update": {
"total": 314.4632121920009,
"count": 22782,
"self": 314.4632121920009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1369997992005665e-06,
"count": 1,
"self": 1.1369997992005665e-06
},
"TrainerController._save_models": {
"total": 0.08596058400007678,
"count": 1,
"self": 0.0015281219998541928,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08443246200022259,
"count": 1,
"self": 0.08443246200022259
}
}
}
}
}
}
}