poca-SoccerTwos / run_logs /timers.json
CzarnyRycerz's picture
First Push`
57bf8f7
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.0323781967163086,
"min": 3.028630256652832,
"max": 3.1578166484832764,
"count": 46
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36582.609375,
"min": 8456.013671875,
"max": 63516.81640625,
"count": 46
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 180.64285714285714,
"min": 122.0952380952381,
"max": 846.6666666666666,
"count": 46
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 10116.0,
"min": 6944.0,
"max": 12312.0,
"count": 46
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1216.768181989199,
"min": 1196.7803876904848,
"max": 1216.768181989199,
"count": 46
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 34069.50909569758,
"min": 2396.013634548928,
"max": 51074.11271406362,
"count": 46
},
"SoccerTwos.Step.mean": {
"value": 729870.0,
"min": 504585.0,
"max": 729870.0,
"count": 46
},
"SoccerTwos.Step.sum": {
"value": 729870.0,
"min": 504585.0,
"max": 729870.0,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.01322362944483757,
"min": -0.03081587702035904,
"max": 0.03335408866405487,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.38348525762557983,
"min": -0.549109935760498,
"max": 0.6337276697158813,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.015240748412907124,
"min": -0.036322277039289474,
"max": 0.03474472835659981,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.441981703042984,
"min": -0.7264455556869507,
"max": 0.6601498126983643,
"count": 46
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 46
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09548965404773581,
"min": -0.7134461540442246,
"max": 0.5550461480250726,
"count": 46
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.7691999673843384,
"min": -11.96839988231659,
"max": 14.431199848651886,
"count": 46
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09548965404773581,
"min": -0.7134461540442246,
"max": 0.5550461480250726,
"count": 46
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.7691999673843384,
"min": -11.96839988231659,
"max": 14.431199848651886,
"count": 46
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019903309404617174,
"min": 0.013024471352400724,
"max": 0.021504139783792196,
"count": 10
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019903309404617174,
"min": 0.013024471352400724,
"max": 0.021504139783792196,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.027103863842785358,
"min": 0.008610093407332898,
"max": 0.027103863842785358,
"count": 10
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.027103863842785358,
"min": 0.008610093407332898,
"max": 0.027103863842785358,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.028238827114303908,
"min": 0.008656914609794815,
"max": 0.028238827114303908,
"count": 10
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.028238827114303908,
"min": 0.008656914609794815,
"max": 0.028238827114303908,
"count": 10
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 10
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 10
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694249041",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\F:\\ProgramData\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1694250256"
},
"total": 1214.1833425999998,
"count": 1,
"self": 0.025925999999799387,
"children": {
"run_training.setup": {
"total": 0.13912049999999976,
"count": 1,
"self": 0.13912049999999976
},
"TrainerController.start_learning": {
"total": 1214.0182961,
"count": 1,
"self": 0.509170700007644,
"children": {
"TrainerController._reset_env": {
"total": 5.146971900000045,
"count": 3,
"self": 5.146971900000045
},
"TrainerController.advance": {
"total": 1208.1020323999924,
"count": 15220,
"self": 0.4884746999850904,
"children": {
"env_step": {
"total": 368.8959817999953,
"count": 15220,
"self": 268.22477900000547,
"children": {
"SubprocessEnvManager._take_step": {
"total": 100.3731970999918,
"count": 15220,
"self": 3.480500099981171,
"children": {
"TorchPolicy.evaluate": {
"total": 96.89269700001063,
"count": 29800,
"self": 96.89269700001063
}
}
},
"workers": {
"total": 0.2980056999980025,
"count": 15220,
"self": 0.0,
"children": {
"worker_root": {
"total": 1152.9434599000028,
"count": 15220,
"is_parallel": true,
"self": 946.5915738999968,
"children": {
"steps_from_proto": {
"total": 0.008539299999917205,
"count": 6,
"is_parallel": true,
"self": 0.002213200000059423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006326099999857782,
"count": 24,
"is_parallel": true,
"self": 0.006326099999857782
}
}
},
"UnityEnvironment.step": {
"total": 206.34334670000612,
"count": 15220,
"is_parallel": true,
"self": 11.742921100015451,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.293605799993244,
"count": 15220,
"is_parallel": true,
"self": 12.293605799993244
},
"communicator.exchange": {
"total": 145.6519815999885,
"count": 15220,
"is_parallel": true,
"self": 145.6519815999885
},
"steps_from_proto": {
"total": 36.65483820000894,
"count": 30440,
"is_parallel": true,
"self": 8.078218800022832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.576619399986107,
"count": 121760,
"is_parallel": true,
"self": 28.576619399986107
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 838.7175759000119,
"count": 15220,
"self": 3.7937626000210685,
"children": {
"process_trajectory": {
"total": 102.25514999999076,
"count": 15220,
"self": 102.25514999999076
},
"_update_policy": {
"total": 732.6686633,
"count": 11,
"self": 64.43444000000022,
"children": {
"TorchPOCAOptimizer.update": {
"total": 668.2342232999998,
"count": 322,
"self": 668.2342232999998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3999999737279722e-06,
"count": 1,
"self": 1.3999999737279722e-06
},
"TrainerController._save_models": {
"total": 0.2601196999999047,
"count": 1,
"self": 0.012864399999898524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24725530000000617,
"count": 1,
"self": 0.24725530000000617
}
}
}
}
}
}
}