poca-SoccerTwos / run_logs /timers.json
oookayamaswallow's picture
First Push`
256b7c7 verified
raw
history blame
16 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.0485711097717285,
"min": 2.0096960067749023,
"max": 3.295670509338379,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40512.54296875,
"min": 22345.341796875,
"max": 117245.546875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 53.130434782608695,
"min": 46.31132075471698,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19552.0,
"min": 13488.0,
"max": 26280.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1603.4641878651487,
"min": 1183.89241046223,
"max": 1611.9522980967179,
"count": 478
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 295037.41056718735,
"min": 2368.4239366025095,
"max": 331993.47975224745,
"count": 478
},
"SoccerTwos.Step.mean": {
"value": 4999994.0,
"min": 9632.0,
"max": 4999994.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999994.0,
"min": 9632.0,
"max": 4999994.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.018234437331557274,
"min": -0.1353471726179123,
"max": 0.21857284009456635,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.3551363945007324,
"min": -19.21929931640625,
"max": 27.321605682373047,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01912573166191578,
"min": -0.1435144543647766,
"max": 0.2151324301958084,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.519134521484375,
"min": -20.379053115844727,
"max": 26.89155387878418,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.10963695723077525,
"min": -0.5714285714285714,
"max": 0.5921999999121124,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -20.173200130462646,
"min": -49.97800004482269,
"max": 52.71919992566109,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.10963695723077525,
"min": -0.5714285714285714,
"max": 0.5921999999121124,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -20.173200130462646,
"min": -49.97800004482269,
"max": 52.71919992566109,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015300305435569801,
"min": 0.009983889403035089,
"max": 0.023893511632923036,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015300305435569801,
"min": 0.009983889403035089,
"max": 0.023893511632923036,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10630333895484606,
"min": 5.166237686656435e-06,
"max": 0.11564212689797083,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10630333895484606,
"min": 5.166237686656435e-06,
"max": 0.11564212689797083,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10747923279802005,
"min": 5.727423391969447e-06,
"max": 0.11835149203737577,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10747923279802005,
"min": 5.727423391969447e-06,
"max": 0.11835149203737577,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723392168",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\oooka\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0",
"numpy_version": "1.26.4",
"end_time_seconds": "1723402079"
},
"total": 9910.934570000041,
"count": 1,
"self": 0.24824730027467012,
"children": {
"run_training.setup": {
"total": 0.13672429998405278,
"count": 1,
"self": 0.13672429998405278
},
"TrainerController.start_learning": {
"total": 9910.549598399783,
"count": 1,
"self": 7.506645122077316,
"children": {
"TrainerController._reset_env": {
"total": 5.8953447008971125,
"count": 25,
"self": 5.8953447008971125
},
"TrainerController.advance": {
"total": 9896.9692063767,
"count": 338399,
"self": 6.461926730582491,
"children": {
"env_step": {
"total": 7330.1945369967725,
"count": 338399,
"self": 4335.045731663471,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2990.569579374045,
"count": 338399,
"self": 43.23830114072189,
"children": {
"TorchPolicy.evaluate": {
"total": 2947.3312782333232,
"count": 633978,
"self": 2947.3312782333232
}
}
},
"workers": {
"total": 4.579225959256291,
"count": 338399,
"self": 0.0,
"children": {
"worker_root": {
"total": 9895.991913326317,
"count": 338399,
"is_parallel": true,
"self": 6460.7014326925855,
"children": {
"steps_from_proto": {
"total": 0.04588100081309676,
"count": 50,
"is_parallel": true,
"self": 0.0076262010261416435,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03825479978695512,
"count": 200,
"is_parallel": true,
"self": 0.03825479978695512
}
}
},
"UnityEnvironment.step": {
"total": 3435.244599632919,
"count": 338399,
"is_parallel": true,
"self": 168.21028075320646,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 141.06665501440875,
"count": 338399,
"is_parallel": true,
"self": 141.06665501440875
},
"communicator.exchange": {
"total": 2533.740279228892,
"count": 338399,
"is_parallel": true,
"self": 2533.740279228892
},
"steps_from_proto": {
"total": 592.2273846364114,
"count": 676798,
"is_parallel": true,
"self": 100.4645481184125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 491.76283651799895,
"count": 2707192,
"is_parallel": true,
"self": 491.76283651799895
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2560.3127426493447,
"count": 338399,
"self": 57.29812220367603,
"children": {
"process_trajectory": {
"total": 1202.7426054454409,
"count": 338399,
"self": 1201.0307818448637,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7118236005771905,
"count": 10,
"self": 1.7118236005771905
}
}
},
"_update_policy": {
"total": 1300.2720150002278,
"count": 239,
"self": 838.4231178008486,
"children": {
"TorchPOCAOptimizer.update": {
"total": 461.8488971993793,
"count": 7173,
"self": 461.8488971993793
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.0012326836586e-07,
"count": 1,
"self": 9.0012326836586e-07
},
"TrainerController._save_models": {
"total": 0.17840129998512566,
"count": 1,
"self": 0.00241199997253716,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1759893000125885,
"count": 1,
"self": 0.1759893000125885
}
}
}
}
}
}
}