atorre's picture
20M steps
23f49ea
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5764824151992798,
"min": 1.4329878091812134,
"max": 1.7143844366073608,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32437.703125,
"min": 26598.671875,
"max": 39483.58203125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.723684210526315,
"min": 39.65853658536585,
"max": 83.9298245614035,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19372.0,
"min": 18016.0,
"max": 21160.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1831.6654251467062,
"min": 1665.8446566899242,
"max": 1849.1948130862559,
"count": 1000
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 278413.14462229935,
"min": 200742.21848018424,
"max": 424643.348255108,
"count": 1000
},
"SoccerTwos.Step.mean": {
"value": 19999964.0,
"min": 10009993.0,
"max": 19999964.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 19999964.0,
"min": 10009993.0,
"max": 19999964.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.012385610491037369,
"min": -0.12546078860759735,
"max": 0.10439267754554749,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.8949984312057495,
"min": -23.71208953857422,
"max": 17.642362594604492,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.008823879063129425,
"min": -0.12814757227897644,
"max": 0.10648775100708008,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.3500534296035767,
"min": -24.219890594482422,
"max": 17.996429443359375,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04934640141094432,
"min": -0.42219999773161754,
"max": 0.2998220690365495,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 7.549999415874481,
"min": -59.10799968242645,
"max": 57.995399475097656,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04934640141094432,
"min": -0.42219999773161754,
"max": 0.2998220690365495,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 7.549999415874481,
"min": -59.10799968242645,
"max": 57.995399475097656,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01566379334932814,
"min": 0.010678550639810661,
"max": 0.025296956343421093,
"count": 485
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01566379334932814,
"min": 0.010678550639810661,
"max": 0.025296956343421093,
"count": 485
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10293621495366097,
"min": 0.08394738659262657,
"max": 0.12795103788375856,
"count": 485
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10293621495366097,
"min": 0.08394738659262657,
"max": 0.12795103788375856,
"count": 485
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10428817396362623,
"min": 0.08583409736553828,
"max": 0.12964263384540875,
"count": 485
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10428817396362623,
"min": 0.08583409736553828,
"max": 0.12964263384540875,
"count": 485
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 485
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 485
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 485
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 485
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 485
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 485
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675929581",
"python_version": "3.9.16 (main, Jan 11 2023, 10:02:19) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/sasha/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1675964293"
},
"total": 34713.153975744,
"count": 1,
"self": 1.1884499919979135,
"children": {
"run_training.setup": {
"total": 0.024261327999999915,
"count": 1,
"self": 0.024261327999999915
},
"TrainerController.start_learning": {
"total": 34711.941264424,
"count": 1,
"self": 17.211584158030746,
"children": {
"TrainerController._reset_env": {
"total": 2.7759898310025846,
"count": 41,
"self": 2.7759898310025846
},
"TrainerController.advance": {
"total": 34691.753133194965,
"count": 694746,
"self": 17.369019368925365,
"children": {
"env_step": {
"total": 10826.626759303184,
"count": 694746,
"self": 8767.138531000455,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2048.917628187205,
"count": 694746,
"self": 81.44402606552262,
"children": {
"TorchPolicy.evaluate": {
"total": 1967.4736021216825,
"count": 1253658,
"self": 1967.4736021216825
}
}
},
"workers": {
"total": 10.570600115524362,
"count": 694746,
"self": 0.0,
"children": {
"worker_root": {
"total": 34678.73068446067,
"count": 694746,
"is_parallel": true,
"self": 27593.21355565262,
"children": {
"steps_from_proto": {
"total": 0.07436059001540274,
"count": 82,
"is_parallel": true,
"self": 0.01642363302297767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05793695699242507,
"count": 328,
"is_parallel": true,
"self": 0.05793695699242507
}
}
},
"UnityEnvironment.step": {
"total": 7085.442768218039,
"count": 694746,
"is_parallel": true,
"self": 459.360720204123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 278.0116277030982,
"count": 694746,
"is_parallel": true,
"self": 278.0116277030982
},
"communicator.exchange": {
"total": 5166.799519090822,
"count": 694746,
"is_parallel": true,
"self": 5166.799519090822
},
"steps_from_proto": {
"total": 1181.270901219996,
"count": 1389492,
"is_parallel": true,
"self": 266.1426242594232,
"children": {
"_process_rank_one_or_two_observation": {
"total": 915.1282769605727,
"count": 5557968,
"is_parallel": true,
"self": 915.1282769605727
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 23847.757354522855,
"count": 694746,
"self": 129.15915005264833,
"children": {
"process_trajectory": {
"total": 3603.874626982194,
"count": 694746,
"self": 3599.5238403741896,
"children": {
"RLTrainer._checkpoint": {
"total": 4.350786608004228,
"count": 20,
"self": 4.350786608004228
}
}
},
"_update_policy": {
"total": 20114.723577488014,
"count": 485,
"self": 1628.560945050216,
"children": {
"TorchPOCAOptimizer.update": {
"total": 18486.162632437798,
"count": 14550,
"self": 18486.162632437798
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.360010153613985e-07,
"count": 1,
"self": 9.360010153613985e-07
},
"TrainerController._save_models": {
"total": 0.20055630400020164,
"count": 1,
"self": 0.0020769100010511465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1984793939991505,
"count": 1,
"self": 0.1984793939991505
}
}
}
}
}
}
}