poca-SoccerTwos / run_logs /timers.json
neopolita's picture
First Push
86de546
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.7046122550964355,
"min": 2.6670632362365723,
"max": 3.295757293701172,
"count": 310
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 55650.1015625,
"min": 29854.0859375,
"max": 143241.625,
"count": 310
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 496.1,
"max": 999.0,
"count": 310
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 16636.0,
"max": 23376.0,
"count": 310
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1178.2672953656393,
"min": 1178.241773305686,
"max": 1198.0725899581505,
"count": 86
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2356.5345907312785,
"min": 2356.483546611372,
"max": 16764.473512461092,
"count": 86
},
"SoccerTwos.Step.mean": {
"value": 3099310.0,
"min": 9236.0,
"max": 3099310.0,
"count": 310
},
"SoccerTwos.Step.sum": {
"value": 3099310.0,
"min": 9236.0,
"max": 3099310.0,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 3.326240403112024e-06,
"min": -0.08251701295375824,
"max": 0.003230756614357233,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.326240403112024e-05,
"min": -1.480699896812439,
"max": 0.032307565212249756,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.220529717713362e-06,
"min": -0.08225759118795395,
"max": 0.002531674224883318,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 4.220529808662832e-05,
"min": -1.4805045127868652,
"max": 0.025316741317510605,
"count": 310
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 310
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.4666666666666667,
"max": 0.30194285086223055,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -8.000799983739853,
"max": 4.227199912071228,
"count": 310
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.4666666666666667,
"max": 0.30194285086223055,
"count": 310
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -8.000799983739853,
"max": 4.227199912071228,
"count": 310
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 310
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 310
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014196703045551354,
"min": 0.011082978248790216,
"max": 0.023002480048065384,
"count": 142
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014196703045551354,
"min": 0.011082978248790216,
"max": 0.023002480048065384,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 4.825971577915311e-09,
"min": 4.045047408141045e-09,
"max": 0.006606397265568375,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 4.825971577915311e-09,
"min": 4.045047408141045e-09,
"max": 0.006606397265568375,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 6.109092032247077e-09,
"min": 4.427689844395862e-09,
"max": 0.006149715169643363,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 6.109092032247077e-09,
"min": 4.427689844395862e-09,
"max": 0.006149715169643363,
"count": 142
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702827158",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./SoccerTwos.yaml --env=./SoccerTwos.x86_64 --run-id=SoccerTwos_1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702834540"
},
"total": 7382.306381205999,
"count": 1,
"self": 0.14331866899919987,
"children": {
"run_training.setup": {
"total": 0.05478789800008599,
"count": 1,
"self": 0.05478789800008599
},
"TrainerController.start_learning": {
"total": 7382.108274638999,
"count": 1,
"self": 5.606544872985978,
"children": {
"TrainerController._reset_env": {
"total": 5.570854120998774,
"count": 16,
"self": 5.570854120998774
},
"TrainerController.advance": {
"total": 7370.5489595800145,
"count": 202750,
"self": 6.453958368620988,
"children": {
"env_step": {
"total": 6139.760278121176,
"count": 202750,
"self": 4752.688299051529,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1383.3648924447625,
"count": 202750,
"self": 40.21068052804685,
"children": {
"TorchPolicy.evaluate": {
"total": 1343.1542119167157,
"count": 402902,
"self": 1343.1542119167157
}
}
},
"workers": {
"total": 3.7070866248850507,
"count": 202749,
"self": 0.0,
"children": {
"worker_root": {
"total": 7367.403083115713,
"count": 202749,
"is_parallel": true,
"self": 3376.382348381327,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00636034799981644,
"count": 2,
"is_parallel": true,
"self": 0.004144261999044829,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002216086000771611,
"count": 8,
"is_parallel": true,
"self": 0.002216086000771611
}
}
},
"UnityEnvironment.step": {
"total": 0.03994014199997764,
"count": 1,
"is_parallel": true,
"self": 0.0011644820001492917,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008380680001209839,
"count": 1,
"is_parallel": true,
"self": 0.0008380680001209839
},
"communicator.exchange": {
"total": 0.03439905999994153,
"count": 1,
"is_parallel": true,
"self": 0.03439905999994153
},
"steps_from_proto": {
"total": 0.003538531999765837,
"count": 2,
"is_parallel": true,
"self": 0.0006358130003718543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002902718999393983,
"count": 8,
"is_parallel": true,
"self": 0.002902718999393983
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3990.984828654385,
"count": 202748,
"is_parallel": true,
"self": 252.67103470223992,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 162.07382316129087,
"count": 202748,
"is_parallel": true,
"self": 162.07382316129087
},
"communicator.exchange": {
"total": 2808.1984878539156,
"count": 202748,
"is_parallel": true,
"self": 2808.1984878539156
},
"steps_from_proto": {
"total": 768.0414829369388,
"count": 405496,
"is_parallel": true,
"self": 124.34170448355826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 643.6997784533805,
"count": 1621984,
"is_parallel": true,
"self": 643.6997784533805
}
}
}
}
},
"steps_from_proto": {
"total": 0.03590608000104112,
"count": 30,
"is_parallel": true,
"self": 0.007482484003048739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02842359599799238,
"count": 120,
"is_parallel": true,
"self": 0.02842359599799238
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1224.3347230902173,
"count": 202749,
"self": 48.79317560247273,
"children": {
"process_trajectory": {
"total": 351.6710498117436,
"count": 202749,
"self": 350.1722736277443,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4987761839993254,
"count": 6,
"self": 1.4987761839993254
}
}
},
"_update_policy": {
"total": 823.8704976760009,
"count": 142,
"self": 551.3476389289863,
"children": {
"TorchPOCAOptimizer.update": {
"total": 272.52285874701465,
"count": 4260,
"self": 272.52285874701465
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4500001270789653e-06,
"count": 1,
"self": 1.4500001270789653e-06
},
"TrainerController._save_models": {
"total": 0.3819146149999142,
"count": 1,
"self": 0.003658150000774185,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37825646499914,
"count": 1,
"self": 0.37825646499914
}
}
}
}
}
}
}