poca-SoccerTwos / run_logs /timers.json
yechenzhi1's picture
First Push
7162d2d verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4303474426269531,
"min": 1.1953974962234497,
"max": 3.29569411277771,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 19132.328125,
"min": 16187.94921875,
"max": 143469.59375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 283.05263157894734,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 16156.0,
"max": 23956.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.4411450126954,
"min": 1189.2658214645403,
"max": 1221.1020871596347,
"count": 348
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2408.882290025391,
"min": 2378.5316429290806,
"max": 38542.76393313485,
"count": 348
},
"SoccerTwos.Step.mean": {
"value": 49999574.0,
"min": 9232.0,
"max": 49999574.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999574.0,
"min": 9232.0,
"max": 49999574.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 8.863906987244263e-05,
"min": -0.09684797376394272,
"max": 0.039017047733068466,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.0008863906841725111,
"min": -1.4526851177215576,
"max": 0.39017048478126526,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.832582534523681e-05,
"min": -0.09686007350683212,
"max": 0.03836328908801079,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.0008832582389004529,
"min": -1.4529011249542236,
"max": 0.3836328983306885,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.6857384592294693,
"max": 0.30922666390736897,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -17.829199939966202,
"max": 7.01119989156723,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.6857384592294693,
"max": 0.30922666390736897,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -17.829199939966202,
"max": 7.01119989156723,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019303698907606304,
"min": 0.009621734620789841,
"max": 0.025912615560810082,
"count": 2287
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019303698907606304,
"min": 0.009621734620789841,
"max": 0.025912615560810082,
"count": 2287
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 1.3651335052193106e-07,
"min": 7.843297929535642e-12,
"max": 0.017249827086925507,
"count": 2287
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 1.3651335052193106e-07,
"min": 7.843297929535642e-12,
"max": 0.017249827086925507,
"count": 2287
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 1.364581883098644e-07,
"min": 1.3570412126194068e-11,
"max": 0.01751269195228815,
"count": 2287
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 1.364581883098644e-07,
"min": 1.3570412126194068e-11,
"max": 0.01751269195228815,
"count": 2287
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2287
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2287
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 2287
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 2287
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2287
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2287
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709392019",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/opt/conda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709509854"
},
"total": 117835.85971496999,
"count": 1,
"self": 0.4010079428553581,
"children": {
"run_training.setup": {
"total": 0.04044067859649658,
"count": 1,
"self": 0.04044067859649658
},
"TrainerController.start_learning": {
"total": 117835.41826634854,
"count": 1,
"self": 49.42197136580944,
"children": {
"TrainerController._reset_env": {
"total": 24.56546761840582,
"count": 249,
"self": 24.56546761840582
},
"TrainerController.advance": {
"total": 117761.21391058713,
"count": 3252957,
"self": 53.66396142542362,
"children": {
"env_step": {
"total": 55676.000531986356,
"count": 3252957,
"self": 43812.915923289955,
"children": {
"SubprocessEnvManager._take_step": {
"total": 11831.879136480391,
"count": 3252957,
"self": 426.82750248908997,
"children": {
"TorchPolicy.evaluate": {
"total": 11405.051633991301,
"count": 6462430,
"self": 11405.051633991301
}
}
},
"workers": {
"total": 31.205472216010094,
"count": 3252957,
"self": 0.0,
"children": {
"worker_root": {
"total": 117734.94591672719,
"count": 3252957,
"is_parallel": true,
"self": 81630.8699169308,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027211830019950867,
"count": 2,
"is_parallel": true,
"self": 0.000684984028339386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020361989736557007,
"count": 8,
"is_parallel": true,
"self": 0.0020361989736557007
}
}
},
"UnityEnvironment.step": {
"total": 0.02486460655927658,
"count": 1,
"is_parallel": true,
"self": 0.0005846098065376282,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047321617603302,
"count": 1,
"is_parallel": true,
"self": 0.00047321617603302
},
"communicator.exchange": {
"total": 0.021958790719509125,
"count": 1,
"is_parallel": true,
"self": 0.021958790719509125
},
"steps_from_proto": {
"total": 0.0018479898571968079,
"count": 2,
"is_parallel": true,
"self": 0.0003945678472518921,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014534220099449158,
"count": 8,
"is_parallel": true,
"self": 0.0014534220099449158
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 36103.5907208547,
"count": 3252956,
"is_parallel": true,
"self": 1735.802337884903,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1249.075766555965,
"count": 3252956,
"is_parallel": true,
"self": 1249.075766555965
},
"communicator.exchange": {
"total": 27656.65300156176,
"count": 3252956,
"is_parallel": true,
"self": 27656.65300156176
},
"steps_from_proto": {
"total": 5462.059614852071,
"count": 6505912,
"is_parallel": true,
"self": 1043.9579699635506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4418.10164488852,
"count": 26023648,
"is_parallel": true,
"self": 4418.10164488852
}
}
}
}
},
"steps_from_proto": {
"total": 0.4852789416909218,
"count": 496,
"is_parallel": true,
"self": 0.09975766390562057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.3855212777853012,
"count": 1984,
"is_parallel": true,
"self": 0.3855212777853012
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 62031.54941717535,
"count": 3252957,
"self": 635.0691465362906,
"children": {
"process_trajectory": {
"total": 6193.857440367341,
"count": 3252957,
"self": 6172.539449818432,
"children": {
"RLTrainer._checkpoint": {
"total": 21.31799054890871,
"count": 100,
"self": 21.31799054890871
}
}
},
"_update_policy": {
"total": 55202.62283027172,
"count": 2287,
"self": 6496.640734091401,
"children": {
"TorchPOCAOptimizer.update": {
"total": 48705.98209618032,
"count": 68613,
"self": 48705.98209618032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3634562492370605e-06,
"count": 1,
"self": 1.3634562492370605e-06
},
"TrainerController._save_models": {
"total": 0.21691541373729706,
"count": 1,
"self": 0.009875722229480743,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20703969150781631,
"count": 1,
"self": 0.20703969150781631
}
}
}
}
}
}
}