poca-SoccerTwos / run_logs /timers.json
efainman's picture
First Push
94a5f5f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.931823492050171,
"min": 1.931823492050171,
"max": 3.295708656311035,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 39934.65625,
"min": 15912.376953125,
"max": 134047.03125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 42.530434782608694,
"min": 40.074380165289256,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19564.0,
"min": 3996.0,
"max": 30536.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1392.0737726668938,
"min": 1197.001194479805,
"max": 1410.1028201929319,
"count": 480
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 320176.9677133856,
"min": 2396.0910923381834,
"max": 338264.64338618796,
"count": 480
},
"SoccerTwos.Step.mean": {
"value": 4999986.0,
"min": 9030.0,
"max": 4999986.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999986.0,
"min": 9030.0,
"max": 4999986.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.020223937928676605,
"min": -0.0889531672000885,
"max": 0.17569175362586975,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.631281852722168,
"min": -20.281322479248047,
"max": 32.8543586730957,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02101924456655979,
"min": -0.08862104266881943,
"max": 0.17977046966552734,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.813406944274902,
"min": -20.205596923828125,
"max": 33.6170768737793,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09265327453613281,
"min": -0.5882352941176471,
"max": 0.4976159930229187,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -21.217599868774414,
"min": -51.62679994106293,
"max": 61.36760061979294,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09265327453613281,
"min": -0.5882352941176471,
"max": 0.4976159930229187,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -21.217599868774414,
"min": -51.62679994106293,
"max": 61.36760061979294,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016220482345670462,
"min": 0.010982282368543868,
"max": 0.02318152036362638,
"count": 238
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016220482345670462,
"min": 0.010982282368543868,
"max": 0.02318152036362638,
"count": 238
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10962900544206301,
"min": 1.2445342698204816e-05,
"max": 0.11698166231314341,
"count": 238
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10962900544206301,
"min": 1.2445342698204816e-05,
"max": 0.11698166231314341,
"count": 238
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11299168641368548,
"min": 1.2131652086585139e-05,
"max": 0.11991602629423141,
"count": 238
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11299168641368548,
"min": 1.2131652086585139e-05,
"max": 0.11991602629423141,
"count": 238
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 238
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 238
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 238
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 238
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 238
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 238
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690658924",
"python_version": "3.9.17 (main, Jul 5 2023, 16:17:03) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/eran/opt/anaconda3/envs/rl/bin/mlagents-learn /Users/eran/rl_course/ml-agents/config/poca/SoccerTwos.yaml --env=/Users/eran/rl_course/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos7 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1690681988"
},
"total": 23063.798682286,
"count": 1,
"self": 0.7454080399984377,
"children": {
"run_training.setup": {
"total": 0.03289832700000006,
"count": 1,
"self": 0.03289832700000006
},
"TrainerController.start_learning": {
"total": 23063.020375919,
"count": 1,
"self": 7.038636560420855,
"children": {
"TrainerController._reset_env": {
"total": 5.367032481000958,
"count": 25,
"self": 5.367032481000958
},
"TrainerController.advance": {
"total": 23050.44275078758,
"count": 336044,
"self": 7.238345269568526,
"children": {
"env_step": {
"total": 5254.720844670348,
"count": 336044,
"self": 4325.264234471794,
"children": {
"SubprocessEnvManager._take_step": {
"total": 925.0752680246542,
"count": 336044,
"self": 41.21437040421495,
"children": {
"TorchPolicy.evaluate": {
"total": 883.8608976204392,
"count": 634836,
"self": 883.8608976204392
}
}
},
"workers": {
"total": 4.381342173899432,
"count": 336044,
"self": 0.0,
"children": {
"worker_root": {
"total": 23046.73958037902,
"count": 336044,
"is_parallel": true,
"self": 19536.17354361685,
"children": {
"steps_from_proto": {
"total": 0.07485022300207511,
"count": 50,
"is_parallel": true,
"self": 0.023650201003051485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.051200021999023626,
"count": 200,
"is_parallel": true,
"self": 0.051200021999023626
}
}
},
"UnityEnvironment.step": {
"total": 3510.4911865391687,
"count": 336044,
"is_parallel": true,
"self": 204.17153747656357,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 120.79371196432899,
"count": 336044,
"is_parallel": true,
"self": 120.79371196432899
},
"communicator.exchange": {
"total": 2597.191372849924,
"count": 336044,
"is_parallel": true,
"self": 2597.191372849924
},
"steps_from_proto": {
"total": 588.3345642483523,
"count": 672088,
"is_parallel": true,
"self": 121.10676272161152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 467.2278015267408,
"count": 2688352,
"is_parallel": true,
"self": 467.2278015267408
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17788.483560847664,
"count": 336044,
"self": 52.8782545597096,
"children": {
"process_trajectory": {
"total": 1572.0076158219758,
"count": 336044,
"self": 1569.918028740971,
"children": {
"RLTrainer._checkpoint": {
"total": 2.0895870810047654,
"count": 10,
"self": 2.0895870810047654
}
}
},
"_update_policy": {
"total": 16163.597690465978,
"count": 238,
"self": 787.0364935509697,
"children": {
"TorchPOCAOptimizer.update": {
"total": 15376.561196915009,
"count": 7152,
"self": 15376.561196915009
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.879986464511603e-07,
"count": 1,
"self": 9.879986464511603e-07
},
"TrainerController._save_models": {
"total": 0.17195510199962882,
"count": 1,
"self": 0.003491924002446467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16846317799718236,
"count": 1,
"self": 0.16846317799718236
}
}
}
}
}
}
}