poca-SoccerTwos / run_logs /timers.json
Unterwexi's picture
First on A4000
3d689b5
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.904728651046753,
"min": 1.8506577014923096,
"max": 3.295701503753662,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37546.01171875,
"min": 14177.96484375,
"max": 130773.28125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.0,
"min": 37.49230769230769,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19880.0,
"min": 16212.0,
"max": 23652.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1610.7610266624235,
"min": 1202.3052571940093,
"max": 1611.1401450931248,
"count": 495
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 228728.06578606414,
"min": 2413.5701353887225,
"max": 387799.9827319372,
"count": 495
},
"SoccerTwos.Step.mean": {
"value": 4999998.0,
"min": 9030.0,
"max": 4999998.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999998.0,
"min": 9030.0,
"max": 4999998.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0025095499586313963,
"min": -0.08400537818670273,
"max": 0.17201215028762817,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.35384654998779297,
"min": -16.277118682861328,
"max": 27.767690658569336,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0030053879600018263,
"min": -0.08430910110473633,
"max": 0.17198973894119263,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.42375969886779785,
"min": -15.025320053100586,
"max": 27.629457473754883,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05678439647593397,
"min": -0.5462799966335297,
"max": 0.44480000314165336,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.00659990310669,
"min": -47.8956001996994,
"max": 56.047400057315826,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05678439647593397,
"min": -0.5462799966335297,
"max": 0.44480000314165336,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.00659990310669,
"min": -47.8956001996994,
"max": 56.047400057315826,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018534475660756774,
"min": 0.01142779259001448,
"max": 0.028547672267692784,
"count": 241
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018534475660756774,
"min": 0.01142779259001448,
"max": 0.028547672267692784,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0974157584210237,
"min": 0.000931472180915686,
"max": 0.1241871953010559,
"count": 241
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0974157584210237,
"min": 0.000931472180915686,
"max": 0.1241871953010559,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09848379567265511,
"min": 0.0009322931223626559,
"max": 0.12728464975953102,
"count": 241
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09848379567265511,
"min": 0.0009322931223626559,
"max": 0.12728464975953102,
"count": 241
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 241
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 241
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 241
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675705406",
"python_version": "3.9.16 (main, Jan 11 2023, 16:16:36) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\paperspace\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwosV3 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1675719321"
},
"total": 13914.5420648,
"count": 1,
"self": 0.2545884000010119,
"children": {
"run_training.setup": {
"total": 0.09392030000000018,
"count": 1,
"self": 0.09392030000000018
},
"TrainerController.start_learning": {
"total": 13914.1935561,
"count": 1,
"self": 7.957616199984841,
"children": {
"TrainerController._reset_env": {
"total": 4.019166299997285,
"count": 25,
"self": 4.019166299997285
},
"TrainerController.advance": {
"total": 13902.083278300019,
"count": 343552,
"self": 7.558167800010779,
"children": {
"env_step": {
"total": 5666.114875299987,
"count": 343552,
"self": 4606.377788799868,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1055.0909048000776,
"count": 343552,
"self": 41.26710340049283,
"children": {
"TorchPolicy.evaluate": {
"total": 1013.8238013995848,
"count": 629886,
"self": 1013.8238013995848
}
}
},
"workers": {
"total": 4.646181700041403,
"count": 343552,
"self": 0.0,
"children": {
"worker_root": {
"total": 13898.14573649977,
"count": 343552,
"is_parallel": true,
"self": 10121.544544700058,
"children": {
"steps_from_proto": {
"total": 0.03868940000534016,
"count": 50,
"is_parallel": true,
"self": 0.008047100007024799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03064229999831536,
"count": 200,
"is_parallel": true,
"self": 0.03064229999831536
}
}
},
"UnityEnvironment.step": {
"total": 3776.562502399706,
"count": 343552,
"is_parallel": true,
"self": 158.93165549989635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 118.51616079997808,
"count": 343552,
"is_parallel": true,
"self": 118.51616079997808
},
"communicator.exchange": {
"total": 3020.8293958996646,
"count": 343552,
"is_parallel": true,
"self": 3020.8293958996646
},
"steps_from_proto": {
"total": 478.28529020016686,
"count": 687104,
"is_parallel": true,
"self": 97.97731250005751,
"children": {
"_process_rank_one_or_two_observation": {
"total": 380.30797770010935,
"count": 2748416,
"is_parallel": true,
"self": 380.30797770010935
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8228.41023520002,
"count": 343552,
"self": 63.70748739979172,
"children": {
"process_trajectory": {
"total": 1252.8230386002326,
"count": 343552,
"self": 1251.5467199002326,
"children": {
"RLTrainer._checkpoint": {
"total": 1.276318700000047,
"count": 10,
"self": 1.276318700000047
}
}
},
"_update_policy": {
"total": 6911.879709199997,
"count": 241,
"self": 803.4124982000103,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6108.4672109999865,
"count": 7230,
"self": 6108.4672109999865
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999998731771484e-07,
"count": 1,
"self": 6.999998731771484e-07
},
"TrainerController._save_models": {
"total": 0.1334945999988122,
"count": 1,
"self": 0.007632799999555573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12586179999925662,
"count": 1,
"self": 0.12586179999925662
}
}
}
}
}
}
}