poca-SoccerTwos / run_logs /timers.json
mdapri's picture
First Push
6c919ed
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1950876712799072,
"min": 3.175467014312744,
"max": 3.2957043647766113,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 68707.1640625,
"min": 21706.2109375,
"max": 106147.3515625,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 886.1666666666666,
"min": 470.7,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21268.0,
"min": 14512.0,
"max": 27304.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1196.9729515151796,
"min": 1196.9729515151796,
"max": 1203.187311536932,
"count": 41
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7181.837709091078,
"min": 2395.8630946990847,
"max": 14414.147977534798,
"count": 41
},
"SoccerTwos.Step.mean": {
"value": 499684.0,
"min": 9668.0,
"max": 499684.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499684.0,
"min": 9668.0,
"max": 499684.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008015354163944721,
"min": -0.03513012081384659,
"max": 0.02529294416308403,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09618425369262695,
"min": -0.5551612377166748,
"max": 0.3541012108325958,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.00752301374450326,
"min": -0.03835011646151543,
"max": 0.02526996098458767,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.09027616679668427,
"min": -0.564149022102356,
"max": 0.35376664996147156,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.5833333333333334,
"min": -0.5833333333333334,
"max": 0.23956922957530388,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -7.0,
"min": -7.0,
"max": 3.1143999844789505,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.5833333333333334,
"min": -0.5833333333333334,
"max": 0.23956922957530388,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -7.0,
"min": -7.0,
"max": 3.1143999844789505,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02072515828185715,
"min": 0.014323229012855638,
"max": 0.023502301599364728,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02072515828185715,
"min": 0.014323229012855638,
"max": 0.023502301599364728,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0028850516731229923,
"min": 0.0004598738940937134,
"max": 0.005233720395093163,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0028850516731229923,
"min": 0.0004598738940937134,
"max": 0.005233720395093163,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003104103721367816,
"min": 0.00046241854385395225,
"max": 0.005272268053765098,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003104103721367816,
"min": 0.00046241854385395225,
"max": 0.005272268053765098,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681637309",
"python_version": "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]",
"command_line_arguments": "/home/maurizio/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos_v2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681641251"
},
"total": 3941.8777668539997,
"count": 1,
"self": 0.43736875699960365,
"children": {
"run_training.setup": {
"total": 0.019101017999673786,
"count": 1,
"self": 0.019101017999673786
},
"TrainerController.start_learning": {
"total": 3941.4212970790004,
"count": 1,
"self": 0.913418852027462,
"children": {
"TrainerController._reset_env": {
"total": 1.9824769349993403,
"count": 3,
"self": 1.9824769349993403
},
"TrainerController.advance": {
"total": 3938.183272012973,
"count": 33009,
"self": 1.143271943898526,
"children": {
"env_step": {
"total": 2798.3119356671064,
"count": 33009,
"self": 2659.1536916359737,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.5513532521436,
"count": 33009,
"self": 5.8795235891602715,
"children": {
"TorchPolicy.evaluate": {
"total": 132.67182966298333,
"count": 65524,
"self": 132.67182966298333
}
}
},
"workers": {
"total": 0.6068907789890545,
"count": 33009,
"self": 0.0,
"children": {
"worker_root": {
"total": 3938.194346613913,
"count": 33009,
"is_parallel": true,
"self": 1383.4939637639523,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005342321000171069,
"count": 2,
"is_parallel": true,
"self": 0.0017588700006854197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003583450999485649,
"count": 8,
"is_parallel": true,
"self": 0.003583450999485649
}
}
},
"UnityEnvironment.step": {
"total": 0.11672876400007226,
"count": 1,
"is_parallel": true,
"self": 0.0003051490002690116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0012630559999706747,
"count": 1,
"is_parallel": true,
"self": 0.0012630559999706747
},
"communicator.exchange": {
"total": 0.11163458900000478,
"count": 1,
"is_parallel": true,
"self": 0.11163458900000478
},
"steps_from_proto": {
"total": 0.003525969999827794,
"count": 2,
"is_parallel": true,
"self": 0.0005598690004262608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002966100999401533,
"count": 8,
"is_parallel": true,
"self": 0.002966100999401533
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2554.695803235961,
"count": 33008,
"is_parallel": true,
"self": 8.984133617129373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.91529072786125,
"count": 33008,
"is_parallel": true,
"self": 51.91529072786125
},
"communicator.exchange": {
"total": 2403.7172612920676,
"count": 33008,
"is_parallel": true,
"self": 2403.7172612920676
},
"steps_from_proto": {
"total": 90.07911759890294,
"count": 66016,
"is_parallel": true,
"self": 13.626124582844113,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.45299301605883,
"count": 264064,
"is_parallel": true,
"self": 76.45299301605883
}
}
}
}
},
"steps_from_proto": {
"total": 0.0045796139993399265,
"count": 4,
"is_parallel": true,
"self": 0.0006624750021728687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003917138997167058,
"count": 16,
"is_parallel": true,
"self": 0.003917138997167058
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1138.728064401968,
"count": 33009,
"self": 7.550250501002665,
"children": {
"process_trajectory": {
"total": 143.17477487096812,
"count": 33009,
"self": 142.823888436968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.350886434000131,
"count": 1,
"self": 0.350886434000131
}
}
},
"_update_policy": {
"total": 988.0030390299971,
"count": 23,
"self": 67.80555177600763,
"children": {
"TorchPOCAOptimizer.update": {
"total": 920.1974872539895,
"count": 690,
"self": 920.1974872539895
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.610005923197605e-07,
"count": 1,
"self": 7.610005923197605e-07
},
"TrainerController._save_models": {
"total": 0.3421285180002087,
"count": 1,
"self": 0.01853638000011415,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32359213800009456,
"count": 1,
"self": 0.32359213800009456
}
}
}
}
}
}
}