poca-SoccerTwos / run_logs /timers.json
juliowaissman's picture
este si es el bueno
4d0f3fa verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.4881393909454346,
"min": 1.267083764076233,
"max": 3.295707941055298,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31667.60546875,
"min": 20720.90234375,
"max": 114632.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 74.16417910447761,
"min": 39.61290322580645,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19876.0,
"min": 15132.0,
"max": 25888.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1564.121873635981,
"min": 1187.275000047532,
"max": 1671.1718260434252,
"count": 4982
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 209592.33106722147,
"min": 2375.7091778662934,
"max": 386157.8868361195,
"count": 4982
},
"SoccerTwos.Step.mean": {
"value": 49999878.0,
"min": 9960.0,
"max": 49999878.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999878.0,
"min": 9960.0,
"max": 49999878.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.008406383916735649,
"min": -0.1671251356601715,
"max": 0.1544373631477356,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.1264554262161255,
"min": -28.875722885131836,
"max": 22.500560760498047,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006617323961108923,
"min": -0.16993848979473114,
"max": 0.15489326417446136,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.8867214322090149,
"min": -28.25513458251953,
"max": 23.072601318359375,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.19355820808837662,
"min": -0.5454545454545454,
"max": 0.4153510210465412,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -25.93679988384247,
"min": -63.07760012149811,
"max": 61.79600018262863,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.19355820808837662,
"min": -0.5454545454545454,
"max": 0.4153510210465412,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -25.93679988384247,
"min": -63.07760012149811,
"max": 61.79600018262863,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015391487810605516,
"min": 0.009520550646751265,
"max": 0.02611013041653981,
"count": 2423
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015391487810605516,
"min": 0.009520550646751265,
"max": 0.02611013041653981,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08373884509007136,
"min": 0.00010859680818005775,
"max": 0.12758082672953605,
"count": 2423
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08373884509007136,
"min": 0.00010859680818005775,
"max": 0.12758082672953605,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08460065623124441,
"min": 0.00010996900261185752,
"max": 0.12996851677695911,
"count": 2423
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08460065623124441,
"min": 0.00010996900261185752,
"max": 0.12996851677695911,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2423
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2423
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709688772",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/usr/local/Caskroom/miniconda/base/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1709977981"
},
"total": 289199.275530147,
"count": 1,
"self": 0.5294802180142142,
"children": {
"run_training.setup": {
"total": 0.022152472000016132,
"count": 1,
"self": 0.022152472000016132
},
"TrainerController.start_learning": {
"total": 289198.723897457,
"count": 1,
"self": 62.282380895339884,
"children": {
"TrainerController._reset_env": {
"total": 21.383551281908694,
"count": 250,
"self": 21.383551281908694
},
"TrainerController.advance": {
"total": 289114.87745426275,
"count": 3451434,
"self": 57.12020782352192,
"children": {
"env_step": {
"total": 220441.46845890585,
"count": 3451434,
"self": 212659.3925891547,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7742.6899995735475,
"count": 3451434,
"self": 311.0775826106228,
"children": {
"TorchPolicy.evaluate": {
"total": 7431.612416962925,
"count": 6281466,
"self": 7431.612416962925
}
}
},
"workers": {
"total": 39.38587017759187,
"count": 3451434,
"self": 0.0,
"children": {
"worker_root": {
"total": 289093.1810051828,
"count": 3451434,
"is_parallel": true,
"self": 83019.8263213698,
"children": {
"steps_from_proto": {
"total": 0.4584483848675518,
"count": 500,
"is_parallel": true,
"self": 0.09805343066022942,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.36039495420732237,
"count": 2000,
"is_parallel": true,
"self": 0.36039495420732237
}
}
},
"UnityEnvironment.step": {
"total": 206072.89623542814,
"count": 3451434,
"is_parallel": true,
"self": 604.0867641420336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3522.8695682702487,
"count": 3451434,
"is_parallel": true,
"self": 3522.8695682702487
},
"communicator.exchange": {
"total": 196411.9097094806,
"count": 3451434,
"is_parallel": true,
"self": 196411.9097094806
},
"steps_from_proto": {
"total": 5534.030193535267,
"count": 6902868,
"is_parallel": true,
"self": 1007.7933321512019,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4526.236861384065,
"count": 27611472,
"is_parallel": true,
"self": 4526.236861384065
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 68616.28878753335,
"count": 3451434,
"self": 452.80791006171785,
"children": {
"process_trajectory": {
"total": 8743.687690217801,
"count": 3451434,
"self": 8724.906973570996,
"children": {
"RLTrainer._checkpoint": {
"total": 18.780716646805104,
"count": 100,
"self": 18.780716646805104
}
}
},
"_update_policy": {
"total": 59419.79318725384,
"count": 2423,
"self": 5168.717389132471,
"children": {
"TorchPOCAOptimizer.update": {
"total": 54251.07579812137,
"count": 72690,
"self": 54251.07579812137
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.359776645898819e-07,
"count": 1,
"self": 7.359776645898819e-07
},
"TrainerController._save_models": {
"total": 0.1805102810030803,
"count": 1,
"self": 0.00240195298101753,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17810832802206278,
"count": 1,
"self": 0.17810832802206278
}
}
}
}
}
}
}