{ "name": "root", "gauges": { "SoccerTwos.Policy.Entropy.mean": { "value": 3.208261251449585, "min": 3.1911509037017822, "max": 3.2957277297973633, "count": 50 }, "SoccerTwos.Policy.Entropy.sum": { "value": 31415.294921875, "min": 28115.021484375, "max": 118325.5390625, "count": 50 }, "SoccerTwos.Environment.EpisodeLength.mean": { "value": 929.0, "min": 396.25, "max": 999.0, "count": 50 }, "SoccerTwos.Environment.EpisodeLength.sum": { "value": 18580.0, "min": 14872.0, "max": 25528.0, "count": 50 }, "SoccerTwos.Self-play.ELO.mean": { "value": 1201.3297316570142, "min": 1201.3297316570142, "max": 1210.067407045527, "count": 42 }, "SoccerTwos.Self-play.ELO.sum": { "value": 2402.6594633140285, "min": 2402.6594633140285, "max": 19316.405024047865, "count": 42 }, "SoccerTwos.Step.mean": { "value": 499896.0, "min": 9782.0, "max": 499896.0, "count": 50 }, "SoccerTwos.Step.sum": { "value": 499896.0, "min": 9782.0, "max": 499896.0, "count": 50 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": { "value": -0.003555102040991187, "min": -0.06530507653951645, "max": 0.004243441391736269, "count": 50 }, "SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": { "value": -0.03910612314939499, "min": -0.9795331954956055, "max": 0.05092129483819008, "count": 50 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.mean": { "value": -0.0029598837718367577, "min": -0.06527626514434814, "max": 0.0018671752186492085, "count": 50 }, "SoccerTwos.Policy.ExtrinsicValueEstimate.sum": { "value": -0.03255872055888176, "min": -0.9791319370269775, "max": 0.02240610308945179, "count": 50 }, "SoccerTwos.Environment.CumulativeReward.mean": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 50 }, "SoccerTwos.Environment.CumulativeReward.sum": { "value": 0.0, "min": 0.0, "max": 0.0, "count": 50 }, "SoccerTwos.Policy.ExtrinsicReward.mean": { "value": -0.15001818266781894, "min": -0.48276666676004726, "max": 0.26585714306150166, "count": 50 }, "SoccerTwos.Policy.ExtrinsicReward.sum": { "value": -1.6502000093460083, "min": -5.868800044059753, "max": 3.722000002861023, "count": 50 }, "SoccerTwos.Environment.GroupCumulativeReward.mean": { "value": -0.15001818266781894, "min": -0.48276666676004726, "max": 0.26585714306150166, "count": 50 }, "SoccerTwos.Environment.GroupCumulativeReward.sum": { "value": -1.6502000093460083, "min": -5.868800044059753, "max": 3.722000002861023, "count": 50 }, "SoccerTwos.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "SoccerTwos.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 50 }, "SoccerTwos.Losses.PolicyLoss.mean": { "value": 0.01261608124380776, "min": 0.01241865698248148, "max": 0.020427995805706207, "count": 23 }, "SoccerTwos.Losses.PolicyLoss.sum": { "value": 0.01261608124380776, "min": 0.01241865698248148, "max": 0.020427995805706207, "count": 23 }, "SoccerTwos.Losses.ValueLoss.mean": { "value": 0.002159492698653291, "min": 4.244034444127465e-05, "max": 0.00495028196213146, "count": 23 }, "SoccerTwos.Losses.ValueLoss.sum": { "value": 0.002159492698653291, "min": 4.244034444127465e-05, "max": 0.00495028196213146, "count": 23 }, "SoccerTwos.Losses.BaselineLoss.mean": { "value": 0.0021873546919475, "min": 4.413356667403908e-05, "max": 0.005093085331221422, "count": 23 }, "SoccerTwos.Losses.BaselineLoss.sum": { "value": 0.0021873546919475, "min": 4.413356667403908e-05, "max": 0.005093085331221422, "count": 23 }, "SoccerTwos.Policy.LearningRate.mean": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 23 }, "SoccerTwos.Policy.LearningRate.sum": { "value": 0.0003, "min": 0.0003, "max": 0.0003, "count": 23 }, "SoccerTwos.Policy.Epsilon.mean": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 23 }, "SoccerTwos.Policy.Epsilon.sum": { "value": 0.20000000000000007, "min": 0.20000000000000007, "max": 0.20000000000000007, "count": 23 }, "SoccerTwos.Policy.Beta.mean": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 23 }, "SoccerTwos.Policy.Beta.sum": { "value": 0.005000000000000001, "min": 0.005000000000000001, "max": 0.005000000000000001, "count": 23 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1686977038", "python_version": "3.9.5 (tags/v3.9.5:0a7dcbd, May 3 2021, 17:27:52) [MSC v.1928 64 bit (AMD64)]", "command_line_arguments": "C:\\Users\\aga31\\AppData\\Local\\Programs\\Python\\Python39\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.0.0+cu117", "numpy_version": "1.21.2", "end_time_seconds": "1686977871" }, "total": 833.5216204, "count": 1, "self": 0.2891715999999178, "children": { "run_training.setup": { "total": 0.16286380000000023, "count": 1, "self": 0.16286380000000023 }, "TrainerController.start_learning": { "total": 833.0695850000001, "count": 1, "self": 0.6251623999977483, "children": { "TrainerController._reset_env": { "total": 8.668626199999917, "count": 3, "self": 8.668626199999917 }, "TrainerController.advance": { "total": 823.6053596000023, "count": 32612, "self": 0.6497481999994079, "children": { "env_step": { "total": 634.3756798999941, "count": 32612, "self": 381.63399099998793, "children": { "SubprocessEnvManager._take_step": { "total": 252.33211850000092, "count": 32612, "self": 4.205996300000578, "children": { "TorchPolicy.evaluate": { "total": 248.12612220000034, "count": 64720, "self": 248.12612220000034 } } }, "workers": { "total": 0.40957040000525247, "count": 32612, "self": 0.0, "children": { "worker_root": { "total": 826.4121929999958, "count": 32612, "is_parallel": true, "self": 521.0881473999835, "children": { "steps_from_proto": { "total": 0.0049619999999928055, "count": 6, "is_parallel": true, "self": 0.0010586000001540086, "children": { "_process_rank_one_or_two_observation": { "total": 0.003903399999838797, "count": 24, "is_parallel": true, "self": 0.003903399999838797 } } }, "UnityEnvironment.step": { "total": 305.3190836000123, "count": 32612, "is_parallel": true, "self": 15.160304700021811, "children": { "UnityEnvironment._generate_step_input": { "total": 11.948850100001765, "count": 32612, "is_parallel": true, "self": 11.948850100001765 }, "communicator.exchange": { "total": 231.0126211999953, "count": 32612, "is_parallel": true, "self": 231.0126211999953 }, "steps_from_proto": { "total": 47.19730759999339, "count": 65224, "is_parallel": true, "self": 10.178831499986728, "children": { "_process_rank_one_or_two_observation": { "total": 37.01847610000666, "count": 260896, "is_parallel": true, "self": 37.01847610000666 } } } } } } } } } } }, "trainer_advance": { "total": 188.57993150000883, "count": 32612, "self": 5.1093000000079485, "children": { "process_trajectory": { "total": 65.37371530000095, "count": 32612, "self": 65.10907530000097, "children": { "RLTrainer._checkpoint": { "total": 0.2646399999999858, "count": 1, "self": 0.2646399999999858 } } }, "_update_policy": { "total": 118.09691619999992, "count": 23, "self": 63.96815139999962, "children": { "TorchPOCAOptimizer.update": { "total": 54.128764800000305, "count": 690, "self": 54.128764800000305 } } } } } } }, "TrainerController._save_models": { "total": 0.17043680000006134, "count": 1, "self": 2.9300000051080133e-05, "children": { "RLTrainer._checkpoint": { "total": 0.17040750000001026, "count": 1, "self": 0.17040750000001026 } } } } } } }