poca-SoccerTwos / run_logs /timers.json
jarski's picture
Improve
486e5ff verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6335724592208862,
"min": 1.6277825832366943,
"max": 3.279508352279663,
"count": 1005
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34344.2265625,
"min": 14908.626953125,
"max": 107115.140625,
"count": 1005
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.78082191780823,
"min": 48.8494623655914,
"max": 999.0,
"count": 1005
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20084.0,
"min": 11124.0,
"max": 30732.0,
"count": 1005
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1601.2689811499124,
"min": 1194.8797166991462,
"max": 1637.0165205499825,
"count": 970
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 233785.27124788723,
"min": 2389.8446981820366,
"max": 311765.5772926273,
"count": 970
},
"SoccerTwos.Step.mean": {
"value": 10119940.0,
"min": 69530.0,
"max": 10119940.0,
"count": 1006
},
"SoccerTwos.Step.sum": {
"value": 10119940.0,
"min": 69530.0,
"max": 10119940.0,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05142134055495262,
"min": -0.12232402712106705,
"max": 0.2670028805732727,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.456094264984131,
"min": -17.981632232666016,
"max": 31.805038452148438,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0473678782582283,
"min": -0.1268015056848526,
"max": 0.26963484287261963,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.868342399597168,
"min": -18.639822006225586,
"max": 32.35247802734375,
"count": 1006
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1006
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06106206630838328,
"min": -0.658079997698466,
"max": 0.6973395333083909,
"count": 1006
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.853999614715576,
"min": -62.67580032348633,
"max": 58.319599986076355,
"count": 1006
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06106206630838328,
"min": -0.658079997698466,
"max": 0.6973395333083909,
"count": 1006
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.853999614715576,
"min": -62.67580032348633,
"max": 58.319599986076355,
"count": 1006
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1006
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1006
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020108305018705627,
"min": 0.010677788554069896,
"max": 0.02353180558420718,
"count": 482
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020108305018705627,
"min": 0.010677788554069896,
"max": 0.02353180558420718,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09022632092237473,
"min": 4.517290797897052e-06,
"max": 0.11021627436081569,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09022632092237473,
"min": 4.517290797897052e-06,
"max": 0.11021627436081569,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09162547215819358,
"min": 4.299989313949482e-06,
"max": 0.11155753905574481,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09162547215819358,
"min": 4.299989313949482e-06,
"max": 0.11155753905574481,
"count": 482
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 482
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 482
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724520930",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1724667591"
},
"total": 90074.27497943201,
"count": 1,
"self": 0.13707587201497518,
"children": {
"run_training.setup": {
"total": 0.035481104000609776,
"count": 1,
"self": 0.035481104000609776
},
"TrainerController.start_learning": {
"total": 90074.102422456,
"count": 1,
"self": 17.545740436864435,
"children": {
"TrainerController._reset_env": {
"total": 9.719931646970508,
"count": 51,
"self": 9.719931646970508
},
"TrainerController.advance": {
"total": 90046.54289305216,
"count": 679004,
"self": 16.208958074756083,
"children": {
"env_step": {
"total": 73131.67721376443,
"count": 679004,
"self": 70759.80930916585,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2360.357838527425,
"count": 679004,
"self": 102.18999144615645,
"children": {
"TorchPolicy.evaluate": {
"total": 2258.1678470812685,
"count": 1272426,
"self": 2258.1678470812685
}
}
},
"workers": {
"total": 11.510066071146866,
"count": 679003,
"self": 0.0,
"children": {
"worker_root": {
"total": 90036.9012921317,
"count": 679003,
"is_parallel": true,
"self": 21275.388206758682,
"children": {
"steps_from_proto": {
"total": 0.16437094403318042,
"count": 102,
"is_parallel": true,
"self": 0.023133671023060742,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.14123727301011968,
"count": 408,
"is_parallel": true,
"self": 0.14123727301011968
}
}
},
"UnityEnvironment.step": {
"total": 68761.34871442898,
"count": 679003,
"is_parallel": true,
"self": 213.01454340650525,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1349.2852782567497,
"count": 679003,
"is_parallel": true,
"self": 1349.2852782567497
},
"communicator.exchange": {
"total": 64792.9077759496,
"count": 679003,
"is_parallel": true,
"self": 64792.9077759496
},
"steps_from_proto": {
"total": 2406.141116816123,
"count": 1358006,
"is_parallel": true,
"self": 331.60093752413104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2074.540179291992,
"count": 5432024,
"is_parallel": true,
"self": 2074.540179291992
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 16898.65672121297,
"count": 679003,
"self": 126.47555832953367,
"children": {
"process_trajectory": {
"total": 2235.4793822993934,
"count": 679003,
"self": 2229.025669736394,
"children": {
"RLTrainer._checkpoint": {
"total": 6.453712562999499,
"count": 20,
"self": 6.453712562999499
}
}
},
"_update_policy": {
"total": 14536.701780584044,
"count": 482,
"self": 1582.3004261850965,
"children": {
"TorchPOCAOptimizer.update": {
"total": 12954.401354398948,
"count": 14472,
"self": 12954.401354398948
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.048997652716935e-06,
"count": 1,
"self": 4.048997652716935e-06
},
"TrainerController._save_models": {
"total": 0.2938532709958963,
"count": 1,
"self": 0.00209102600638289,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2917622449895134,
"count": 1,
"self": 0.2917622449895134
}
}
}
}
}
}
}