poca-SoccerTwos / run_logs /timers.json
EchineF's picture
First Push
740061e verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8133866786956787,
"min": 1.8133866786956787,
"max": 3.2957496643066406,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37022.1015625,
"min": 21899.626953125,
"max": 129072.6796875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 51.677083333333336,
"min": 43.49557522123894,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19844.0,
"min": 14856.0,
"max": 26616.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1583.2591328881092,
"min": 1186.902705196918,
"max": 1593.3667897650848,
"count": 493
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 303985.753514517,
"min": 2373.805410393836,
"max": 339933.3672600968,
"count": 493
},
"SoccerTwos.Step.mean": {
"value": 4999982.0,
"min": 9330.0,
"max": 4999982.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999982.0,
"min": 9330.0,
"max": 4999982.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03343738615512848,
"min": -0.09077697992324829,
"max": 0.1848892867565155,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 6.386540412902832,
"min": -16.06752586364746,
"max": 41.56588363647461,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.035941898822784424,
"min": -0.08798559755086899,
"max": 0.19318944215774536,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.864902973175049,
"min": -15.80552864074707,
"max": 43.66081237792969,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.017285863142363064,
"min": -0.5991578949125189,
"max": 0.5558163963380407,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -3.301599860191345,
"min": -47.43159997463226,
"max": 77.27679944038391,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.017285863142363064,
"min": -0.5991578949125189,
"max": 0.5558163963380407,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -3.301599860191345,
"min": -47.43159997463226,
"max": 77.27679944038391,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017720330889278556,
"min": 0.011217654222855344,
"max": 0.02328540737895916,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017720330889278556,
"min": 0.011217654222855344,
"max": 0.02328540737895916,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09902165581782658,
"min": 0.0005704073182035547,
"max": 0.11834300259749095,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09902165581782658,
"min": 0.0005704073182035547,
"max": 0.11834300259749095,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10045446703831355,
"min": 0.0005726657070529958,
"max": 0.12040329898397127,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10045446703831355,
"min": 0.0005726657070529958,
"max": 0.12040329898397127,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711274750",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Hosein\\anaconda3\\envs\\ml_agents_env\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711284757"
},
"total": 10006.498496100001,
"count": 1,
"self": 0.3042918999999529,
"children": {
"run_training.setup": {
"total": 0.12596920000214595,
"count": 1,
"self": 0.12596920000214595
},
"TrainerController.start_learning": {
"total": 10006.068234999999,
"count": 1,
"self": 6.93693239942877,
"children": {
"TrainerController._reset_env": {
"total": 6.484243800005061,
"count": 25,
"self": 6.484243800005061
},
"TrainerController.advance": {
"total": 9992.459395300571,
"count": 341422,
"self": 6.797262996493373,
"children": {
"env_step": {
"total": 7431.051334502161,
"count": 341422,
"self": 4362.111070500487,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3064.452701001166,
"count": 341422,
"self": 40.4734161001179,
"children": {
"TorchPolicy.evaluate": {
"total": 3023.9792849010482,
"count": 631128,
"self": 3023.9792849010482
}
}
},
"workers": {
"total": 4.487563000508089,
"count": 341422,
"self": 0.0,
"children": {
"worker_root": {
"total": 9992.240323700255,
"count": 341422,
"is_parallel": true,
"self": 6485.705728001758,
"children": {
"steps_from_proto": {
"total": 0.04412820001016371,
"count": 50,
"is_parallel": true,
"self": 0.008624300033261534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.035503899976902176,
"count": 200,
"is_parallel": true,
"self": 0.035503899976902176
}
}
},
"UnityEnvironment.step": {
"total": 3506.4904674984864,
"count": 341422,
"is_parallel": true,
"self": 167.9950418963017,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 141.58313099854786,
"count": 341422,
"is_parallel": true,
"self": 141.58313099854786
},
"communicator.exchange": {
"total": 2655.4528280009727,
"count": 341422,
"is_parallel": true,
"self": 2655.4528280009727
},
"steps_from_proto": {
"total": 541.4594666026642,
"count": 682844,
"is_parallel": true,
"self": 110.00853120071042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 431.45093540195376,
"count": 2731376,
"is_parallel": true,
"self": 431.45093540195376
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2554.610797801917,
"count": 341422,
"self": 49.9237516022331,
"children": {
"process_trajectory": {
"total": 1329.3054332996908,
"count": 341422,
"self": 1327.3149899997043,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9904432999865094,
"count": 10,
"self": 1.9904432999865094
}
}
},
"_update_policy": {
"total": 1175.3816128999933,
"count": 240,
"self": 624.3163912998789,
"children": {
"TorchPOCAOptimizer.update": {
"total": 551.0652216001145,
"count": 7200,
"self": 551.0652216001145
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000003385357559e-06,
"count": 1,
"self": 1.0000003385357559e-06
},
"TrainerController._save_models": {
"total": 0.18766249999316642,
"count": 1,
"self": 0.006767299993953202,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18089519999921322,
"count": 1,
"self": 0.18089519999921322
}
}
}
}
}
}
}