poca-SoccerTwos / run_logs /timers.json
Alfredo Wijaya Hardjoprawiro
First Push
f5283af
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5658403635025024,
"min": 1.4710252285003662,
"max": 3.2957184314727783,
"count": 1717
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31467.126953125,
"min": 18574.41796875,
"max": 139646.0,
"count": 1717
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.391304347826086,
"min": 37.54615384615385,
"max": 999.0,
"count": 1717
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19648.0,
"min": 12572.0,
"max": 28304.0,
"count": 1717
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1521.188407031131,
"min": 1188.270663584517,
"max": 1595.4719318265784,
"count": 1705
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 279898.6668937281,
"min": 2381.3380737970483,
"max": 388796.1561717103,
"count": 1705
},
"SoccerTwos.Step.mean": {
"value": 17169998.0,
"min": 9170.0,
"max": 17169998.0,
"count": 1717
},
"SoccerTwos.Step.sum": {
"value": 17169998.0,
"min": 9170.0,
"max": 17169998.0,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.024461986497044563,
"min": -0.1305708885192871,
"max": 0.13727399706840515,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.476543426513672,
"min": -26.767030715942383,
"max": 30.604129791259766,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.020315561443567276,
"min": -0.1328149437904358,
"max": 0.134805828332901,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.717747688293457,
"min": -27.227062225341797,
"max": 30.29873275756836,
"count": 1717
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1717
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04840874281085906,
"min": -0.5,
"max": 0.4267529389437507,
"count": 1717
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 8.858799934387207,
"min": -67.09680008888245,
"max": 79.95440047979355,
"count": 1717
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04840874281085906,
"min": -0.5,
"max": 0.4267529389437507,
"count": 1717
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 8.858799934387207,
"min": -67.09680008888245,
"max": 79.95440047979355,
"count": 1717
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1717
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1717
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01528598769606712,
"min": 0.00972527809208259,
"max": 0.027027319309612115,
"count": 831
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01528598769606712,
"min": 0.00972527809208259,
"max": 0.027027319309612115,
"count": 831
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10846137975653013,
"min": 1.5921545430804448e-06,
"max": 0.1283313696583112,
"count": 831
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10846137975653013,
"min": 1.5921545430804448e-06,
"max": 0.1283313696583112,
"count": 831
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11106423189242681,
"min": 1.6342472804353747e-06,
"max": 0.13056222051382066,
"count": 831
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11106423189242681,
"min": 1.6342472804353747e-06,
"max": 0.13056222051382066,
"count": 831
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 831
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 831
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 831
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 831
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 831
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 831
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700168456",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\alfre\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos3 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1700213347"
},
"total": 44890.34951159998,
"count": 1,
"self": 2.3904775000410154,
"children": {
"run_training.setup": {
"total": 0.12184729997534305,
"count": 1,
"self": 0.12184729997534305
},
"TrainerController.start_learning": {
"total": 44887.83718679997,
"count": 1,
"self": 26.286224765935913,
"children": {
"TrainerController._reset_env": {
"total": 26.348746099625714,
"count": 86,
"self": 26.348746099625714
},
"TrainerController.advance": {
"total": 44834.73868103442,
"count": 1187027,
"self": 24.413343121646903,
"children": {
"env_step": {
"total": 18815.237789576175,
"count": 1187027,
"self": 14979.65202584071,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3819.953066346934,
"count": 1187027,
"self": 136.37660552246962,
"children": {
"TorchPolicy.evaluate": {
"total": 3683.5764608244644,
"count": 2159910,
"self": 3683.5764608244644
}
}
},
"workers": {
"total": 15.632697388529778,
"count": 1187027,
"self": 0.0,
"children": {
"worker_root": {
"total": 44831.930845141294,
"count": 1187027,
"is_parallel": true,
"self": 32809.4089613168,
"children": {
"steps_from_proto": {
"total": 0.13590089965146035,
"count": 172,
"is_parallel": true,
"self": 0.02872670057695359,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.10717419907450676,
"count": 688,
"is_parallel": true,
"self": 0.10717419907450676
}
}
},
"UnityEnvironment.step": {
"total": 12022.38598292484,
"count": 1187027,
"is_parallel": true,
"self": 519.5173937674845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 470.93044068908785,
"count": 1187027,
"is_parallel": true,
"self": 470.93044068908785
},
"communicator.exchange": {
"total": 9405.333153780317,
"count": 1187027,
"is_parallel": true,
"self": 9405.333153780317
},
"steps_from_proto": {
"total": 1626.6049946879502,
"count": 2374054,
"is_parallel": true,
"self": 331.6700939015718,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1294.9349007863784,
"count": 9496216,
"is_parallel": true,
"self": 1294.9349007863784
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 25995.087548336596,
"count": 1187027,
"self": 192.74794052378275,
"children": {
"process_trajectory": {
"total": 4336.063402813743,
"count": 1187027,
"self": 4331.903012313531,
"children": {
"RLTrainer._checkpoint": {
"total": 4.160390500212088,
"count": 34,
"self": 4.160390500212088
}
}
},
"_update_policy": {
"total": 21466.27620499907,
"count": 831,
"self": 2600.887148299138,
"children": {
"TorchPOCAOptimizer.update": {
"total": 18865.389056699933,
"count": 24933,
"self": 18865.389056699933
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.399924702942371e-06,
"count": 1,
"self": 5.399924702942371e-06
},
"TrainerController._save_models": {
"total": 0.4635295000625774,
"count": 1,
"self": 0.022573400055989623,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4409561000065878,
"count": 1,
"self": 0.4409561000065878
}
}
}
}
}
}
}