poca-SoccerTwos / run_logs /timers.json
jrnold's picture
First Push
5763b98
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.295684576034546,
"min": 3.295684576034546,
"max": 3.295684576034546,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 105461.90625,
"min": 105461.90625,
"max": 105461.90625,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 971.875,
"min": 971.875,
"max": 971.875,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 31100.0,
"min": 31100.0,
"max": 31100.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1200.7492804441454,
"min": 1200.7492804441454,
"max": 1200.7492804441454,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2401.4985608882907,
"min": 2401.4985608882907,
"max": 2401.4985608882907,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9566.0,
"min": 9566.0,
"max": 9566.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9566.0,
"min": 9566.0,
"max": 9566.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.007814358919858932,
"min": -0.007814358919858932,
"max": -0.007814358919858932,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.07814358919858932,
"min": -0.07814358919858932,
"max": -0.07814358919858932,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007726958952844143,
"min": -0.007726958952844143,
"max": -0.007726958952844143,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.07726959139108658,
"min": -0.07726959139108658,
"max": -0.07726959139108658,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04343999922275543,
"min": 0.04343999922275543,
"max": 0.04343999922275543,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.4343999922275543,
"min": 0.4343999922275543,
"max": 0.4343999922275543,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04343999922275543,
"min": 0.04343999922275543,
"max": 0.04343999922275543,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.4343999922275543,
"min": 0.4343999922275543,
"max": 0.4343999922275543,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675834207",
"python_version": "3.8.16 (default, Jan 17 2023, 16:42:09) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/jrnold/.pyenv/versions/mambaforge/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1",
"numpy_version": "1.21.2",
"end_time_seconds": "1675834231"
},
"total": 24.360340010999998,
"count": 1,
"self": 0.39096425700000026,
"children": {
"run_training.setup": {
"total": 0.062021694000000016,
"count": 1,
"self": 0.062021694000000016
},
"TrainerController.start_learning": {
"total": 23.90735406,
"count": 1,
"self": 0.02115698399999033,
"children": {
"TrainerController._reset_env": {
"total": 4.128362879,
"count": 1,
"self": 4.128362879
},
"TrainerController.advance": {
"total": 19.576662858000006,
"count": 1001,
"self": 0.02241945900000175,
"children": {
"env_step": {
"total": 14.936388841999996,
"count": 1001,
"self": 12.088677863999937,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2.8336093740000132,
"count": 1001,
"self": 0.11841361199992662,
"children": {
"TorchPolicy.evaluate": {
"total": 2.7151957620000866,
"count": 2000,
"self": 2.7151957620000866
}
}
},
"workers": {
"total": 0.014101604000046564,
"count": 1001,
"self": 0.0,
"children": {
"worker_root": {
"total": 15.600958393999996,
"count": 1001,
"is_parallel": true,
"self": 5.729921020999935,
"children": {
"steps_from_proto": {
"total": 0.003751954999999363,
"count": 2,
"is_parallel": true,
"self": 0.0006619229999991205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030900320000002424,
"count": 8,
"is_parallel": true,
"self": 0.0030900320000002424
}
}
},
"UnityEnvironment.step": {
"total": 9.867285418000062,
"count": 1001,
"is_parallel": true,
"self": 0.5009040660000927,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.3315615400000018,
"count": 1001,
"is_parallel": true,
"self": 0.3315615400000018
},
"communicator.exchange": {
"total": 7.466221223999969,
"count": 1001,
"is_parallel": true,
"self": 7.466221223999969
},
"steps_from_proto": {
"total": 1.5685985879999995,
"count": 2002,
"is_parallel": true,
"self": 0.31877567899988346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.249822909000116,
"count": 8008,
"is_parallel": true,
"self": 1.249822909000116
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4.617854557000007,
"count": 1001,
"self": 0.10161975499998288,
"children": {
"process_trajectory": {
"total": 4.5162348020000245,
"count": 1001,
"self": 4.5162348020000245
}
}
}
}
},
"trainer_threads": {
"total": 1.0560000021087035e-06,
"count": 1,
"self": 1.0560000021087035e-06
},
"TrainerController._save_models": {
"total": 0.18117028300000015,
"count": 1,
"self": 0.0015677130000000261,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17960257000000013,
"count": 1,
"self": 0.17960257000000013
}
}
}
}
}
}
}