poca-SoccerTwos / run_logs /timers.json
TalesLF's picture
First Push
cf9e6ba
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6863433122634888,
"min": 1.610230803489685,
"max": 3.2958340644836426,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34482.34765625,
"min": 22927.703125,
"max": 126578.2578125,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 73.46268656716418,
"min": 41.82608695652174,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19688.0,
"min": 14580.0,
"max": 25648.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1537.4496764329986,
"min": 1199.9202472092547,
"max": 1553.2350253986842,
"count": 964
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 206018.2566420218,
"min": 2403.9369507707456,
"max": 350631.704432548,
"count": 964
},
"SoccerTwos.Step.mean": {
"value": 9999966.0,
"min": 9842.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999966.0,
"min": 9842.0,
"max": 9999966.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.08222750574350357,
"min": -0.12791788578033447,
"max": 0.14219766855239868,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -11.018486022949219,
"min": -24.688152313232422,
"max": 24.691482543945312,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08165950328111649,
"min": -0.13370053470134735,
"max": 0.14756102859973907,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.942373275756836,
"min": -25.804203033447266,
"max": 27.317825317382812,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.2531432834134173,
"min": -0.5710439013271797,
"max": 0.3946000010999915,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -33.92119997739792,
"min": -55.90700018405914,
"max": 62.56120002269745,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.2531432834134173,
"min": -0.5710439013271797,
"max": 0.3946000010999915,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -33.92119997739792,
"min": -55.90700018405914,
"max": 62.56120002269745,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017500149679399328,
"min": 0.011435189948533661,
"max": 0.022689260540064423,
"count": 480
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017500149679399328,
"min": 0.011435189948533661,
"max": 0.022689260540064423,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.18740496307611465,
"min": 1.7955275467329556e-06,
"max": 0.24405075132846832,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.18740496307611465,
"min": 1.7955275467329556e-06,
"max": 0.24405075132846832,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.1905432152748108,
"min": 1.7955275967551643e-06,
"max": 0.25093470692634584,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.1905432152748108,
"min": 1.7955275967551643e-06,
"max": 0.25093470692634584,
"count": 480
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.00010000000000000002,
"min": 0.00010000000000000002,
"max": 0.00010000000000000002,
"count": 480
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.00010000000000000002,
"min": 0.00010000000000000002,
"max": 0.00010000000000000002,
"count": 480
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999996,
"min": 0.19999999999999996,
"max": 0.19999999999999996,
"count": 480
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999996,
"min": 0.19999999999999996,
"max": 0.19999999999999996,
"count": 480
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 480
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 480
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690394239",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/media/tales/Arquivos/programas/anaconda3/envs/deep_rl_course/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690409562"
},
"total": 15323.025925444002,
"count": 1,
"self": 0.2687284430139698,
"children": {
"run_training.setup": {
"total": 0.007668242993531749,
"count": 1,
"self": 0.007668242993531749
},
"TrainerController.start_learning": {
"total": 15322.749528757995,
"count": 1,
"self": 12.110927096742671,
"children": {
"TrainerController._reset_env": {
"total": 4.205071338030393,
"count": 50,
"self": 4.205071338030393
},
"TrainerController.advance": {
"total": 15305.790938593214,
"count": 681596,
"self": 11.946660688365228,
"children": {
"env_step": {
"total": 10454.168608721156,
"count": 681596,
"self": 7913.05739205188,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2533.9507824896864,
"count": 681596,
"self": 60.26924840272113,
"children": {
"TorchPolicy.evaluate": {
"total": 2473.6815340869653,
"count": 1263402,
"self": 2473.6815340869653
}
}
},
"workers": {
"total": 7.160434179590084,
"count": 681596,
"self": 0.0,
"children": {
"worker_root": {
"total": 15301.718218787122,
"count": 681596,
"is_parallel": true,
"self": 8654.149133410567,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018684110109461471,
"count": 2,
"is_parallel": true,
"self": 0.0005044080317020416,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013640029792441055,
"count": 8,
"is_parallel": true,
"self": 0.0013640029792441055
}
}
},
"UnityEnvironment.step": {
"total": 0.02145323299919255,
"count": 1,
"is_parallel": true,
"self": 0.00040478500886820257,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000322125997627154,
"count": 1,
"is_parallel": true,
"self": 0.000322125997627154
},
"communicator.exchange": {
"total": 0.019366559994523413,
"count": 1,
"is_parallel": true,
"self": 0.019366559994523413
},
"steps_from_proto": {
"total": 0.0013597619981737807,
"count": 2,
"is_parallel": true,
"self": 0.0002718639589147642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010878980392590165,
"count": 8,
"is_parallel": true,
"self": 0.0010878980392590165
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6647.499781142571,
"count": 681595,
"is_parallel": true,
"self": 322.82868523942307,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 190.95615946760518,
"count": 681595,
"is_parallel": true,
"self": 190.95615946760518
},
"communicator.exchange": {
"total": 5196.379570267993,
"count": 681595,
"is_parallel": true,
"self": 5196.379570267993
},
"steps_from_proto": {
"total": 937.3353661675501,
"count": 1363190,
"is_parallel": true,
"self": 183.50248364861181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 753.8328825189383,
"count": 5452760,
"is_parallel": true,
"self": 753.8328825189383
}
}
}
}
},
"steps_from_proto": {
"total": 0.06930423398443963,
"count": 98,
"is_parallel": true,
"self": 0.013548166796681471,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05575606718775816,
"count": 392,
"is_parallel": true,
"self": 0.05575606718775816
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4839.675669183693,
"count": 681596,
"self": 79.58560234920878,
"children": {
"process_trajectory": {
"total": 1241.1745990144263,
"count": 681596,
"self": 1230.5183461274573,
"children": {
"RLTrainer._checkpoint": {
"total": 10.656252886969014,
"count": 20,
"self": 10.656252886969014
}
}
},
"_update_policy": {
"total": 3518.9154678200575,
"count": 480,
"self": 1869.5161502189585,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1649.399317601099,
"count": 24000,
"self": 1649.399317601099
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.430054781958461e-07,
"count": 1,
"self": 6.430054781958461e-07
},
"TrainerController._save_models": {
"total": 0.6425910870020743,
"count": 1,
"self": 0.0027272389997961,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6398638480022782,
"count": 1,
"self": 0.6398638480022782
}
}
}
}
}
}
}