poca-SoccerTwos / run_logs /timers.json
britojr's picture
First Push
a1e9d53
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2957091331481934,
"min": 3.2957088947296143,
"max": 3.2957091331481934,
"count": 2
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 105462.6953125,
"min": 105462.6875,
"max": 105462.6953125,
"count": 2
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 820.5555555555555,
"min": 747.6,
"max": 820.5555555555555,
"count": 2
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 29540.0,
"min": 29540.0,
"max": 29904.0,
"count": 2
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1204.3260408183755,
"min": 1202.234955112457,
"max": 1204.3260408183755,
"count": 2
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4817.304163273502,
"min": 4817.304163273502,
"max": 9617.879640899657,
"count": 2
},
"SoccerTwos.Step.mean": {
"value": 19760.0,
"min": 9972.0,
"max": 19760.0,
"count": 2
},
"SoccerTwos.Step.sum": {
"value": 19760.0,
"min": 9972.0,
"max": 19760.0,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.05389903113245964,
"min": 0.053897786885499954,
"max": 0.05389903113245964,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.7006874084472656,
"min": 0.7006874084472656,
"max": 0.8084667921066284,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.053914885967969894,
"min": 0.053908221423625946,
"max": 0.053914885967969894,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.7008935213088989,
"min": 0.7008935213088989,
"max": 0.8086233139038086,
"count": 2
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.24713846353384164,
"min": 0.24713846353384164,
"max": 0.3353066682815552,
"count": 2
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.2128000259399414,
"min": 3.2128000259399414,
"max": 5.029600024223328,
"count": 2
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.24713846353384164,
"min": 0.24713846353384164,
"max": 0.3353066682815552,
"count": 2
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.2128000259399414,
"min": 3.2128000259399414,
"max": 5.029600024223328,
"count": 2
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679310557",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./mlagents/training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679310771"
},
"total": 213.63438585899985,
"count": 1,
"self": 0.004880209000020841,
"children": {
"run_training.setup": {
"total": 0.10676537499966798,
"count": 1,
"self": 0.10676537499966798
},
"TrainerController.start_learning": {
"total": 213.52274027500016,
"count": 1,
"self": 0.06978656700766805,
"children": {
"TrainerController._reset_env": {
"total": 6.874114396000095,
"count": 1,
"self": 6.874114396000095
},
"TrainerController.advance": {
"total": 206.3656459959925,
"count": 2007,
"self": 0.056377601015810797,
"children": {
"env_step": {
"total": 49.52503003998436,
"count": 2007,
"self": 39.26393838098193,
"children": {
"SubprocessEnvManager._take_step": {
"total": 10.225431635991754,
"count": 2007,
"self": 0.293289149984048,
"children": {
"TorchPolicy.evaluate": {
"total": 9.932142486007706,
"count": 4000,
"self": 9.932142486007706
}
}
},
"workers": {
"total": 0.03566002301067783,
"count": 2007,
"self": 0.0,
"children": {
"worker_root": {
"total": 70.43364111999199,
"count": 2007,
"is_parallel": true,
"self": 37.895469893981044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008032550000280025,
"count": 2,
"is_parallel": true,
"self": 0.005440920000637561,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002591629999642464,
"count": 8,
"is_parallel": true,
"self": 0.002591629999642464
}
}
},
"UnityEnvironment.step": {
"total": 0.03891077799971754,
"count": 1,
"is_parallel": true,
"self": 0.0009908909996738657,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008593200000177603,
"count": 1,
"is_parallel": true,
"self": 0.0008593200000177603
},
"communicator.exchange": {
"total": 0.03214213799992649,
"count": 1,
"is_parallel": true,
"self": 0.03214213799992649
},
"steps_from_proto": {
"total": 0.004918429000099422,
"count": 2,
"is_parallel": true,
"self": 0.0006280000006881892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004290428999411233,
"count": 8,
"is_parallel": true,
"self": 0.004290428999411233
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 32.538171226010945,
"count": 2006,
"is_parallel": true,
"self": 1.9469291459959095,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.1429492400129675,
"count": 2006,
"is_parallel": true,
"self": 1.1429492400129675
},
"communicator.exchange": {
"total": 23.64912555699766,
"count": 2006,
"is_parallel": true,
"self": 23.64912555699766
},
"steps_from_proto": {
"total": 5.799167283004408,
"count": 4012,
"is_parallel": true,
"self": 1.0706966910133815,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4.728470591991027,
"count": 16048,
"is_parallel": true,
"self": 4.728470591991027
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 156.78423835499234,
"count": 2007,
"self": 0.3064260689839102,
"children": {
"process_trajectory": {
"total": 20.03202570000849,
"count": 2007,
"self": 20.03202570000849
},
"_update_policy": {
"total": 136.44578658599994,
"count": 1,
"self": 4.7922829409999395,
"children": {
"TorchPOCAOptimizer.update": {
"total": 131.653503645,
"count": 37,
"self": 131.653503645
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.890000001178123e-07,
"count": 1,
"self": 9.890000001178123e-07
},
"TrainerController._save_models": {
"total": 0.21319232699988788,
"count": 1,
"self": 0.0014776899997741566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21171463700011373,
"count": 1,
"self": 0.21171463700011373
}
}
}
}
}
}
}