poca-SoccerTwos / run_logs /timers.json
DContrerasF's picture
Extended train
931a870
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5594910383224487,
"min": 1.5007145404815674,
"max": 3.2958080768585205,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30441.265625,
"min": 19399.912109375,
"max": 128602.0625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.91489361702128,
"min": 39.16528925619835,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19144.0,
"min": 13976.0,
"max": 28560.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1543.3786780823398,
"min": 993.6767511934177,
"max": 1570.0615274071952,
"count": 976
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 290155.1914794799,
"min": 1991.059276822115,
"max": 377381.6375228315,
"count": 976
},
"SoccerTwos.Step.mean": {
"value": 9999968.0,
"min": 9296.0,
"max": 9999968.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999968.0,
"min": 9296.0,
"max": 9999968.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.020725328475236893,
"min": -0.14163443446159363,
"max": 0.33447977900505066,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.9170870780944824,
"min": -25.069293975830078,
"max": 45.955780029296875,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.027763523161411285,
"min": -0.14759673178195953,
"max": 0.3326408863067627,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 5.247305870056152,
"min": -26.124622344970703,
"max": 47.12504959106445,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04137354553061188,
"min": -0.5,
"max": 0.7079642289053134,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 7.8196001052856445,
"min": -68.77140003442764,
"max": 88.84599977731705,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04137354553061188,
"min": -0.5,
"max": 0.7079642289053134,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 7.8196001052856445,
"min": -68.77140003442764,
"max": 88.84599977731705,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015625101576248804,
"min": 0.010895159827972142,
"max": 0.027467859897296876,
"count": 482
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015625101576248804,
"min": 0.010895159827972142,
"max": 0.027467859897296876,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1730583036939303,
"min": 4.477435936678376e-06,
"max": 0.17462901572386424,
"count": 482
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1730583036939303,
"min": 4.477435936678376e-06,
"max": 0.17462901572386424,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.17562171320120493,
"min": 4.54882605633126e-06,
"max": 0.17695415963729222,
"count": 482
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.17562171320120493,
"min": 4.54882605633126e-06,
"max": 0.17695415963729222,
"count": 482
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 482
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 482
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 482
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 482
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700226012",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/slsuser/anaconda3/envs/deeprl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos-v1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1700236614"
},
"total": 10601.792952243006,
"count": 1,
"self": 0.2177073829807341,
"children": {
"run_training.setup": {
"total": 0.012064563110470772,
"count": 1,
"self": 0.012064563110470772
},
"TrainerController.start_learning": {
"total": 10601.563180296915,
"count": 1,
"self": 6.469035892281681,
"children": {
"TrainerController._reset_env": {
"total": 1.770947635639459,
"count": 50,
"self": 1.770947635639459
},
"TrainerController.advance": {
"total": 10593.220908808056,
"count": 686281,
"self": 6.679624347947538,
"children": {
"env_step": {
"total": 8094.933057425311,
"count": 686281,
"self": 6087.868127197726,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2002.8992128658574,
"count": 686281,
"self": 52.26499908720143,
"children": {
"TorchPolicy.evaluate": {
"total": 1950.634213778656,
"count": 1261818,
"self": 1950.634213778656
}
}
},
"workers": {
"total": 4.165717361727729,
"count": 686281,
"self": 0.0,
"children": {
"worker_root": {
"total": 10591.178878826788,
"count": 686281,
"is_parallel": true,
"self": 5369.61120087374,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016926480457186699,
"count": 2,
"is_parallel": true,
"self": 0.00038940715603530407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013032408896833658,
"count": 8,
"is_parallel": true,
"self": 0.0013032408896833658
}
}
},
"UnityEnvironment.step": {
"total": 0.025538987945765257,
"count": 1,
"is_parallel": true,
"self": 0.0005901888944208622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033575715497136116,
"count": 1,
"is_parallel": true,
"self": 0.00033575715497136116
},
"communicator.exchange": {
"total": 0.022857312811538577,
"count": 1,
"is_parallel": true,
"self": 0.022857312811538577
},
"steps_from_proto": {
"total": 0.0017557290848344564,
"count": 2,
"is_parallel": true,
"self": 0.00033177854493260384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014239505399018526,
"count": 8,
"is_parallel": true,
"self": 0.0014239505399018526
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5221.466110505164,
"count": 686280,
"is_parallel": true,
"self": 263.89104490960017,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 178.98514333437197,
"count": 686280,
"is_parallel": true,
"self": 178.98514333437197
},
"communicator.exchange": {
"total": 3965.48220455274,
"count": 686280,
"is_parallel": true,
"self": 3965.48220455274
},
"steps_from_proto": {
"total": 813.1077177084517,
"count": 1372560,
"is_parallel": true,
"self": 146.47998522780836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 666.6277324806433,
"count": 5490240,
"is_parallel": true,
"self": 666.6277324806433
}
}
}
}
},
"steps_from_proto": {
"total": 0.10156744788400829,
"count": 98,
"is_parallel": true,
"self": 0.018083795672282577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08348365221172571,
"count": 392,
"is_parallel": true,
"self": 0.08348365221172571
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2491.608227034798,
"count": 686281,
"self": 58.458906654966995,
"children": {
"process_trajectory": {
"total": 986.5126064775977,
"count": 686281,
"self": 984.3079594504088,
"children": {
"RLTrainer._checkpoint": {
"total": 2.204647027188912,
"count": 20,
"self": 2.204647027188912
}
}
},
"_update_policy": {
"total": 1446.6367139022332,
"count": 482,
"self": 942.7734987116419,
"children": {
"TorchPOCAOptimizer.update": {
"total": 503.8632151905913,
"count": 14460,
"self": 503.8632151905913
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.709007382392883e-07,
"count": 1,
"self": 5.709007382392883e-07
},
"TrainerController._save_models": {
"total": 0.10228739003650844,
"count": 1,
"self": 0.0012010070495307446,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1010863829869777,
"count": 1,
"self": 0.1010863829869777
}
}
}
}
}
}
}