poca-SoccerTwos / run_logs /timers.json
reachrkr's picture
First Push
38195ad
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.19533634185791,
"min": 3.1669719219207764,
"max": 3.2957513332366943,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 68917.015625,
"min": 28434.181640625,
"max": 105464.015625,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 526.6666666666666,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 15628.0,
"max": 26936.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.0420076087978,
"min": 1194.2184049994214,
"max": 1201.7095063467,
"count": 64
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2396.0840152175956,
"min": 2390.0967404942567,
"max": 11988.941960091657,
"count": 64
},
"SoccerTwos.Step.mean": {
"value": 999844.0,
"min": 9484.0,
"max": 999844.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999844.0,
"min": 9484.0,
"max": 999844.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.009531398303806782,
"min": -0.028105400502681732,
"max": 0.01449987106025219,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09531398117542267,
"min": -0.3372648060321808,
"max": 0.18849831819534302,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008838149718940258,
"min": -0.029740313068032265,
"max": 0.01381057221442461,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.08838149905204773,
"min": -0.3568837642669678,
"max": 0.1690513789653778,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5714285714285714,
"max": 0.19809230932822594,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 2.5752000212669373,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5714285714285714,
"max": 0.19809230932822594,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.0,
"max": 2.5752000212669373,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01588540293741971,
"min": 0.011148569887154736,
"max": 0.021742360728482405,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01588540293741971,
"min": 0.011148569887154736,
"max": 0.021742360728482405,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0009573467609394963,
"min": 9.311301473265606e-07,
"max": 0.005382450468217333,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0009573467609394963,
"min": 9.311301473265606e-07,
"max": 0.005382450468217333,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0009645330506221701,
"min": 1.2156386522595615e-06,
"max": 0.0055860036906475825,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0009645330506221701,
"min": 1.2156386522595615e-06,
"max": 0.0055860036906475825,
"count": 46
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678727149",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env=/content/training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678729238"
},
"total": 2088.815020232,
"count": 1,
"self": 0.4385623659995872,
"children": {
"run_training.setup": {
"total": 0.16163720000008652,
"count": 1,
"self": 0.16163720000008652
},
"TrainerController.start_learning": {
"total": 2088.2148206660004,
"count": 1,
"self": 1.4529292570341568,
"children": {
"TrainerController._reset_env": {
"total": 7.2116242549993785,
"count": 6,
"self": 7.2116242549993785
},
"TrainerController.advance": {
"total": 2079.2593675359662,
"count": 65224,
"self": 1.6912534419216172,
"children": {
"env_step": {
"total": 1659.8995953189947,
"count": 65224,
"self": 1300.745286861065,
"children": {
"SubprocessEnvManager._take_step": {
"total": 358.27748184693655,
"count": 65224,
"self": 10.75518702793238,
"children": {
"TorchPolicy.evaluate": {
"total": 347.5222948190042,
"count": 129560,
"self": 347.5222948190042
}
}
},
"workers": {
"total": 0.876826610993021,
"count": 65224,
"self": 0.0,
"children": {
"worker_root": {
"total": 2083.6253783800025,
"count": 65224,
"is_parallel": true,
"self": 986.7496462200538,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004120793000083722,
"count": 2,
"is_parallel": true,
"self": 0.0010642839999945863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003056509000089136,
"count": 8,
"is_parallel": true,
"self": 0.003056509000089136
}
}
},
"UnityEnvironment.step": {
"total": 0.037904391999973086,
"count": 1,
"is_parallel": true,
"self": 0.001060361000327248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007708050000019284,
"count": 1,
"is_parallel": true,
"self": 0.0007708050000019284
},
"communicator.exchange": {
"total": 0.03285783699993772,
"count": 1,
"is_parallel": true,
"self": 0.03285783699993772
},
"steps_from_proto": {
"total": 0.0032153889997061924,
"count": 2,
"is_parallel": true,
"self": 0.0006746859999111621,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025407029997950303,
"count": 8,
"is_parallel": true,
"self": 0.0025407029997950303
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1096.8658263499492,
"count": 65223,
"is_parallel": true,
"self": 62.820905666929775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 44.15425392503403,
"count": 65223,
"is_parallel": true,
"self": 44.15425392503403
},
"communicator.exchange": {
"total": 793.7561593879473,
"count": 65223,
"is_parallel": true,
"self": 793.7561593879473
},
"steps_from_proto": {
"total": 196.13450737003814,
"count": 130446,
"is_parallel": true,
"self": 36.081435931882424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 160.0530714381557,
"count": 521784,
"is_parallel": true,
"self": 160.0530714381557
}
}
}
}
},
"steps_from_proto": {
"total": 0.009905809999509074,
"count": 8,
"is_parallel": true,
"self": 0.0018407020006634411,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008065107998845633,
"count": 32,
"is_parallel": true,
"self": 0.008065107998845633
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 417.66851877504996,
"count": 65224,
"self": 13.551541000070984,
"children": {
"process_trajectory": {
"total": 107.03329781098091,
"count": 65224,
"self": 106.4232389519807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.610058859000219,
"count": 2,
"self": 0.610058859000219
}
}
},
"_update_policy": {
"total": 297.08367996399807,
"count": 46,
"self": 187.63886887799367,
"children": {
"TorchPOCAOptimizer.update": {
"total": 109.44481108600439,
"count": 1380,
"self": 109.44481108600439
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0160001693293452e-06,
"count": 1,
"self": 1.0160001693293452e-06
},
"TrainerController._save_models": {
"total": 0.29089860200019757,
"count": 1,
"self": 0.001919720000387315,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28897888199981026,
"count": 1,
"self": 0.28897888199981026
}
}
}
}
}
}
}