{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 1.7379868030548096, "min": 1.6975775957107544, "max": 3.201785087585449, "count": 583 }, "SoccerTwos.Policy.Entropy.sum": { "value": 36205.7421875, "min": 17922.34375, "max": 103413.0, "count": 583 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 51.2680412371134, "min": 40.17355371900826, "max": 757.0, "count": 583 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 19892.0, "min": 1088.0, "max": 22836.0, "count": 583 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1608.4811955990897, "min": 1188.027129762361, "max": 1630.5792162932098, "count": 583 }, "SoccerTwos.Self-play.ELO.sum": { "value": 312045.3519462234, "min": 4793.198068949542, "max": 389355.7326797779, "count": 583 }, "SoccerTwos.Step.mean": { "value": 6319896.0, "min": 499892.0, "max": 6319896.0, "count": 583 }, "SoccerTwos.Step.sum": { "value": 6319896.0, "min": 499892.0, "max": 6319896.0, "count": 583 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.028669677674770355, "min": -0.10469229519367218, "max": 0.2092435210943222, "count": 583 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -5.533247947692871, "min": -20.62438201904297, "max": 29.339309692382812, "count": 583 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.033311404287815094, "min": -0.10439346730709076, "max": 0.20841465890407562, "count": 583 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -6.42910099029541, "min": -20.565513610839844, "max": 29.39687156677246, "count": 583 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 583 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 583 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.09583005077480652, "min": -0.6111111111111112, "max": 0.8744666576385498, "count": 583 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -18.49519979953766, "min": -55.67280024290085, "max": 52.822800040245056, "count": 583 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.09583005077480652, "min": -0.6111111111111112, "max": 0.8744666576385498, "count": 583 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -18.49519979953766, "min": -55.67280024290085, "max": 52.822800040245056, "count": 583 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 583 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 583 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.016364924714434893, "min": 0.010607179280486888, "max": 0.024574437778210268, "count": 282 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.016364924714434893, "min": 0.010607179280486888, "max": 0.024574437778210268, "count": 282 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.11905259663860003, "min": 0.006973742957537373, "max": 0.13122673953572908, "count": 282 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.11905259663860003, "min": 0.006973742957537373, "max": 0.13122673953572908, "count": 282 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.12101921414335569, "min": 0.007167737962057193, "max": 0.13467016195257506, "count": 282 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.12101921414335569, "min": 0.007167737962057193, "max": 0.13467016195257506, "count": 282 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 282 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 282 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 282 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 282 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 282 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 282 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682476503", "python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]", "command_line_arguments": "C:\\ProgramData\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.0+cpu", "numpy_version": "1.21.2", "end_time_seconds": "1682490007" }, "total": 13503.772477, "count": 1, "self": 0.12243379999927129, "children": { "run_training.setup": { "total": 0.08656180000000013, "count": 1, "self": 0.08656180000000013 }, "TrainerController.start_learning": { "total": 13503.5634814, "count": 1, "self": 8.222062799226478, "children": { "TrainerController._reset_env": { "total": 3.2095754000001326, "count": 31, "self": 3.2095754000001326 }, "TrainerController.advance": { "total": 13492.012980200774, "count": 404820, "self": 7.82166130096266, "children": { "env_step": { "total": 5241.071093200438, "count": 404820, "self": 4070.47834919977, "children": { "SubprocessEnvManager._take_step": { "total": 1165.7801238002407, "count": 404820, "self": 41.74086990063961, "children": { "TorchPolicy.evaluate": { "total": 1124.0392538996011, "count": 732126, "self": 1124.0392538996011 } } }, "workers": { "total": 4.812620200426859, "count": 404820, "self": 0.0, "children": { "worker_root": { "total": 13486.237458999596, "count": 404820, "is_parallel": true, "self": 10222.436056999937, "children": { "steps_from_proto": { "total": 0.042839100005195974, "count": 62, "is_parallel": true, "self": 0.00895909999917155, "children": { "_process_rank_one_or_two_observation": { "total": 0.033880000006024424, "count": 248, "is_parallel": true, "self": 0.033880000006024424 } } }, "UnityEnvironment.step": { "total": 3263.7585628996544, "count": 404820, "is_parallel": true, "self": 163.97235129948922, "children": { "UnityEnvironment._generate_step_input": { "total": 132.70147319994555, "count": 404820, "is_parallel": true, "self": 132.70147319994555 }, "communicator.exchange": { "total": 2435.3904713002435, "count": 404820, "is_parallel": true, "self": 2435.3904713002435 }, "steps_from_proto": { "total": 531.6942670999761, "count": 809640, "is_parallel": true, "self": 112.17390449970418, "children": { "_process_rank_one_or_two_observation": { "total": 419.5203626002719, "count": 3238560, "is_parallel": true, "self": 419.5203626002719 } } } } } } } } } } }, "trainer_advance": { "total": 8243.120225699373, "count": 404820, "self": 57.639064598825826, "children": { "process_trajectory": { "total": 1572.4551873005264, "count": 404820, "self": 1571.0837435005278, "children": { "RLTrainer._checkpoint": { "total": 1.3714437999985565, "count": 12, "self": 1.3714437999985565 } } }, "_update_policy": { "total": 6613.02597380002, "count": 283, "self": 767.7482619000102, "children": { "TorchPOCAOptimizer.update": { "total": 5845.27771190001, "count": 8467, "self": 5845.27771190001 } } } } } } }, "trainer_threads": { "total": 8.999995770864189e-07, "count": 1, "self": 8.999995770864189e-07 }, "TrainerController._save_models": { "total": 0.11886210000011488, "count": 1, "self": 0.008048300000154995, "children": { "RLTrainer._checkpoint": { "total": 0.11081379999995988, "count": 1, "self": 0.11081379999995988 } } } } } } }