poca-SoccerTwos / run_logs /timers.json
slopezay's picture
First Push
046a3a6
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8848768472671509,
"min": 1.8757039308547974,
"max": 3.219226598739624,
"count": 563
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36008.6875,
"min": 2922.93212890625,
"max": 112725.4453125,
"count": 563
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 59.7037037037037,
"min": 41.04201680672269,
"max": 999.0,
"count": 563
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19344.0,
"min": 3996.0,
"max": 31968.0,
"count": 563
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 565
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 565
},
"SoccerTwos.Step.mean": {
"value": 6139928.0,
"min": 509710.0,
"max": 6139928.0,
"count": 564
},
"SoccerTwos.Step.sum": {
"value": 6139928.0,
"min": 509710.0,
"max": 6139928.0,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.01574029214680195,
"min": -0.1280127763748169,
"max": 0.20034894347190857,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.549927234649658,
"min": -18.177814483642578,
"max": 34.25967025756836,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.023163100704550743,
"min": -0.1331113576889038,
"max": 0.2047886699438095,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.752422332763672,
"min": -18.901813507080078,
"max": 36.43235778808594,
"count": 564
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 564
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.04818271707605432,
"min": -0.6666666666666666,
"max": 0.5627870929818,
"count": 564
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.805600166320801,
"min": -47.43999981880188,
"max": 82.8496001958847,
"count": 564
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.04818271707605432,
"min": -0.6666666666666666,
"max": 0.5627870929818,
"count": 564
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.805600166320801,
"min": -47.43999981880188,
"max": 82.8496001958847,
"count": 564
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1612.051858803779,
"min": 1204.7975114034084,
"max": 1613.035171905125,
"count": 516
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 261152.40112621218,
"min": 2409.595022806817,
"max": 359929.7603653413,
"count": 516
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014765378083878507,
"min": 0.010268934049721186,
"max": 0.023577560948130363,
"count": 267
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014765378083878507,
"min": 0.010268934049721186,
"max": 0.023577560948130363,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10163015499711037,
"min": 5.230705994563323e-07,
"max": 0.11527040724953015,
"count": 267
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10163015499711037,
"min": 5.230705994563323e-07,
"max": 0.11527040724953015,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10291009470820427,
"min": 6.136595336177682e-07,
"max": 0.11760621542731921,
"count": 267
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10291009470820427,
"min": 6.136595336177682e-07,
"max": 0.11760621542731921,
"count": 267
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 267
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999993,
"max": 0.20000000000000007,
"count": 267
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 267
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 267
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684594638",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "F:\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1684617245"
},
"total": 22607.6230215,
"count": 1,
"self": 0.011424199998145923,
"children": {
"run_training.setup": {
"total": 0.10963820000000002,
"count": 1,
"self": 0.10963820000000002
},
"TrainerController.start_learning": {
"total": 22607.501959100002,
"count": 1,
"self": 11.772981600068306,
"children": {
"TrainerController._reset_env": {
"total": 4.750529199993407,
"count": 30,
"self": 4.750529199993407
},
"TrainerController.advance": {
"total": 22590.792137599936,
"count": 379103,
"self": 13.231401799719606,
"children": {
"env_step": {
"total": 9890.113025399467,
"count": 379103,
"self": 7852.966321599334,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2029.1959551004752,
"count": 379103,
"self": 73.52032779910337,
"children": {
"TorchPolicy.evaluate": {
"total": 1955.6756273013718,
"count": 716194,
"self": 1955.6756273013718
}
}
},
"workers": {
"total": 7.950748699657829,
"count": 379103,
"self": 0.0,
"children": {
"worker_root": {
"total": 22567.094467499282,
"count": 379103,
"is_parallel": true,
"self": 16198.500223798728,
"children": {
"steps_from_proto": {
"total": 0.07193569999521987,
"count": 60,
"is_parallel": true,
"self": 0.014546299972570509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05738940002264936,
"count": 240,
"is_parallel": true,
"self": 0.05738940002264936
}
}
},
"UnityEnvironment.step": {
"total": 6368.522308000558,
"count": 379103,
"is_parallel": true,
"self": 290.1131404017842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 229.16394640028744,
"count": 379103,
"is_parallel": true,
"self": 229.16394640028744
},
"communicator.exchange": {
"total": 4896.385889898617,
"count": 379103,
"is_parallel": true,
"self": 4896.385889898617
},
"steps_from_proto": {
"total": 952.8593312998693,
"count": 758206,
"is_parallel": true,
"self": 194.62381970201648,
"children": {
"_process_rank_one_or_two_observation": {
"total": 758.2355115978528,
"count": 3032824,
"is_parallel": true,
"self": 758.2355115978528
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12687.44771040075,
"count": 379103,
"self": 81.34807190068932,
"children": {
"process_trajectory": {
"total": 1985.3780764000421,
"count": 379103,
"self": 1982.4614478000456,
"children": {
"RLTrainer._checkpoint": {
"total": 2.916628599996592,
"count": 12,
"self": 2.916628599996592
}
}
},
"_update_policy": {
"total": 10620.721562100018,
"count": 268,
"self": 1123.9491521999698,
"children": {
"TorchPOCAOptimizer.update": {
"total": 9496.772409900048,
"count": 8071,
"self": 9496.772409900048
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.6000016077887267e-06,
"count": 1,
"self": 2.6000016077887267e-06
},
"TrainerController._save_models": {
"total": 0.18630810000104248,
"count": 1,
"self": 0.0024245000022347085,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18388359999880777,
"count": 1,
"self": 0.18388359999880777
}
}
}
}
}
}
}