poca-SoccerTwos / run_logs /timers.json
abragin's picture
First Push
2f49c0b verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.603534460067749,
"min": 1.5445671081542969,
"max": 3.2957382202148438,
"count": 1117
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31557.55859375,
"min": 25951.4296875,
"max": 128153.71875,
"count": 1117
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 41.6578947368421,
"min": 37.34615384615385,
"max": 999.0,
"count": 1117
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18996.0,
"min": 14572.0,
"max": 27108.0,
"count": 1117
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1592.5233640684746,
"min": 1197.7046097348552,
"max": 1610.753297285041,
"count": 1089
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 363095.3270076122,
"min": 2395.4092194697105,
"max": 415747.5658378394,
"count": 1089
},
"SoccerTwos.Step.mean": {
"value": 11169974.0,
"min": 9570.0,
"max": 11169974.0,
"count": 1117
},
"SoccerTwos.Step.sum": {
"value": 11169974.0,
"min": 9570.0,
"max": 11169974.0,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.033499181270599365,
"min": -0.11869743466377258,
"max": 0.15602651238441467,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -7.637813568115234,
"min": -21.46139907836914,
"max": 22.82703399658203,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02998179756104946,
"min": -0.12342483550310135,
"max": 0.14840447902679443,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.835849761962891,
"min": -21.431331634521484,
"max": 22.890085220336914,
"count": 1117
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1117
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09124210633729633,
"min": -0.4438857152348473,
"max": 0.479200002759002,
"count": 1117
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.803200244903564,
"min": -60.52840006351471,
"max": 67.16500049829483,
"count": 1117
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09124210633729633,
"min": -0.4438857152348473,
"max": 0.479200002759002,
"count": 1117
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.803200244903564,
"min": -60.52840006351471,
"max": 67.16500049829483,
"count": 1117
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1117
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1117
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020101825617409,
"min": 0.011153122591106996,
"max": 0.023868167467298917,
"count": 539
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020101825617409,
"min": 0.011153122591106996,
"max": 0.023868167467298917,
"count": 539
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11902831718325615,
"min": 2.577112907905151e-06,
"max": 0.1321904664238294,
"count": 539
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11902831718325615,
"min": 2.577112907905151e-06,
"max": 0.1321904664238294,
"count": 539
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.12089589461684228,
"min": 5.975091554925408e-06,
"max": 0.13606064369281132,
"count": 539
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.12089589461684228,
"min": 5.975091554925408e-06,
"max": 0.13606064369281132,
"count": 539
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 539
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 539
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 539
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 539
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 539
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 539
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707128678",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/abragin/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707147791"
},
"total": 19113.665000750218,
"count": 1,
"self": 0.10705320304259658,
"children": {
"run_training.setup": {
"total": 0.00941597018390894,
"count": 1,
"self": 0.00941597018390894
},
"TrainerController.start_learning": {
"total": 19113.54853157699,
"count": 1,
"self": 12.653861391358078,
"children": {
"TrainerController._reset_env": {
"total": 2.53149905288592,
"count": 56,
"self": 2.53149905288592
},
"TrainerController.advance": {
"total": 19098.252023634966,
"count": 768562,
"self": 10.504401659127325,
"children": {
"env_step": {
"total": 7862.435224980116,
"count": 768562,
"self": 6292.333217664156,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1562.6804287093692,
"count": 768562,
"self": 58.20337650086731,
"children": {
"TorchPolicy.evaluate": {
"total": 1504.4770522085018,
"count": 1406722,
"self": 1504.4770522085018
}
}
},
"workers": {
"total": 7.421578606590629,
"count": 768561,
"self": 0.0,
"children": {
"worker_root": {
"total": 19099.829939397983,
"count": 768561,
"is_parallel": true,
"self": 14048.566497283522,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019616824574768543,
"count": 2,
"is_parallel": true,
"self": 0.0005305069498717785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014311755076050758,
"count": 8,
"is_parallel": true,
"self": 0.0014311755076050758
}
}
},
"UnityEnvironment.step": {
"total": 0.016289445105940104,
"count": 1,
"is_parallel": true,
"self": 0.0003385818563401699,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034201890230178833,
"count": 1,
"is_parallel": true,
"self": 0.00034201890230178833
},
"communicator.exchange": {
"total": 0.014572937972843647,
"count": 1,
"is_parallel": true,
"self": 0.014572937972843647
},
"steps_from_proto": {
"total": 0.0010359063744544983,
"count": 2,
"is_parallel": true,
"self": 0.00022473791614174843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008111684583127499,
"count": 8,
"is_parallel": true,
"self": 0.0008111684583127499
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5051.167801615316,
"count": 768560,
"is_parallel": true,
"self": 308.7718152431771,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 209.96318291453645,
"count": 768560,
"is_parallel": true,
"self": 209.96318291453645
},
"communicator.exchange": {
"total": 3635.2505905986764,
"count": 768560,
"is_parallel": true,
"self": 3635.2505905986764
},
"steps_from_proto": {
"total": 897.1822128589265,
"count": 1537120,
"is_parallel": true,
"self": 168.52722144313157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 728.6549914157949,
"count": 6148480,
"is_parallel": true,
"self": 728.6549914157949
}
}
}
}
},
"steps_from_proto": {
"total": 0.09564049914479256,
"count": 110,
"is_parallel": true,
"self": 0.018415361177176237,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07722513796761632,
"count": 440,
"is_parallel": true,
"self": 0.07722513796761632
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 11225.312396995723,
"count": 768561,
"self": 101.76328072044998,
"children": {
"process_trajectory": {
"total": 1761.4239688902162,
"count": 768561,
"self": 1758.192041605711,
"children": {
"RLTrainer._checkpoint": {
"total": 3.2319272845052183,
"count": 22,
"self": 3.2319272845052183
}
}
},
"_update_policy": {
"total": 9362.125147385057,
"count": 539,
"self": 834.941310225986,
"children": {
"TorchPOCAOptimizer.update": {
"total": 8527.183837159071,
"count": 16170,
"self": 8527.183837159071
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.207264959812164e-07,
"count": 1,
"self": 6.207264959812164e-07
},
"TrainerController._save_models": {
"total": 0.11114687705412507,
"count": 1,
"self": 0.0008804742246866226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11026640282943845,
"count": 1,
"self": 0.11026640282943845
}
}
}
}
}
}
}