imar0's picture
First Push`
3e04e13
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.500844955444336,
"min": 2.495285749435425,
"max": 3.2957029342651367,
"count": 300
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53858.1953125,
"min": 17241.162109375,
"max": 138758.0,
"count": 300
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 59.20238095238095,
"min": 49.27272727272727,
"max": 999.0,
"count": 300
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19892.0,
"min": 4648.0,
"max": 28996.0,
"count": 300
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1469.9832606731777,
"min": 1194.8912007580068,
"max": 1470.014527038543,
"count": 296
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 246957.18779309385,
"min": 2391.8049136496525,
"max": 283081.6420429733,
"count": 296
},
"SoccerTwos.Step.mean": {
"value": 2999993.0,
"min": 9514.0,
"max": 2999993.0,
"count": 300
},
"SoccerTwos.Step.sum": {
"value": 2999993.0,
"min": 9514.0,
"max": 2999993.0,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.017533302307128906,
"min": -0.07208533585071564,
"max": 0.1647343933582306,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.9455947875976562,
"min": -13.479957580566406,
"max": 19.04033660888672,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.017748290672898293,
"min": -0.07391548156738281,
"max": 0.16976888477802277,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.981712818145752,
"min": -13.029105186462402,
"max": 19.420507431030273,
"count": 300
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 300
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05464166651169459,
"min": -0.6666666666666666,
"max": 0.4502222202718258,
"count": 300
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -9.179799973964691,
"min": -38.95280021429062,
"max": 60.099600076675415,
"count": 300
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05464166651169459,
"min": -0.6666666666666666,
"max": 0.4502222202718258,
"count": 300
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -9.179799973964691,
"min": -38.95280021429062,
"max": 60.099600076675415,
"count": 300
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 300
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 300
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01865873868810013,
"min": 0.01196188665247367,
"max": 0.023687563619265953,
"count": 142
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01865873868810013,
"min": 0.01196188665247367,
"max": 0.023687563619265953,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08710378482937813,
"min": 0.0009272942726965994,
"max": 0.09385835727055868,
"count": 142
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08710378482937813,
"min": 0.0009272942726965994,
"max": 0.09385835727055868,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0889049564798673,
"min": 0.0009163270597734179,
"max": 0.09573070853948593,
"count": 142
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0889049564798673,
"min": 0.0009163270597734179,
"max": 0.09573070853948593,
"count": 142
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 142
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 142
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 142
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678210057",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "D:\\Anaconda\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwosv4 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678221056"
},
"total": 10998.7740747,
"count": 1,
"self": 1.1318019000009372,
"children": {
"run_training.setup": {
"total": 0.1389936999999999,
"count": 1,
"self": 0.1389936999999999
},
"TrainerController.start_learning": {
"total": 10997.5032791,
"count": 1,
"self": 4.733990999786329,
"children": {
"TrainerController._reset_env": {
"total": 4.633831299997904,
"count": 15,
"self": 4.633831299997904
},
"TrainerController.advance": {
"total": 10987.975278500215,
"count": 199586,
"self": 4.548516800003199,
"children": {
"env_step": {
"total": 3664.5654925001027,
"count": 199586,
"self": 2830.132295600328,
"children": {
"SubprocessEnvManager._take_step": {
"total": 831.2741197999054,
"count": 199586,
"self": 27.513424600118583,
"children": {
"TorchPolicy.evaluate": {
"total": 803.7606951997868,
"count": 382106,
"self": 803.7606951997868
}
}
},
"workers": {
"total": 3.159077099869747,
"count": 199586,
"self": 0.0,
"children": {
"worker_root": {
"total": 10986.203460999928,
"count": 199586,
"is_parallel": true,
"self": 8726.255641200094,
"children": {
"steps_from_proto": {
"total": 0.02805699999707789,
"count": 30,
"is_parallel": true,
"self": 0.005531899995676159,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.022525100001401732,
"count": 120,
"is_parallel": true,
"self": 0.022525100001401732
}
}
},
"UnityEnvironment.step": {
"total": 2259.9197627998365,
"count": 199586,
"is_parallel": true,
"self": 123.73674910074124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 103.62098329991029,
"count": 199586,
"is_parallel": true,
"self": 103.62098329991029
},
"communicator.exchange": {
"total": 1656.5284987998737,
"count": 199586,
"is_parallel": true,
"self": 1656.5284987998737
},
"steps_from_proto": {
"total": 376.0335315993112,
"count": 399172,
"is_parallel": true,
"self": 73.19729549866753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 302.83623610064365,
"count": 1596688,
"is_parallel": true,
"self": 302.83623610064365
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7318.861269200109,
"count": 199586,
"self": 35.26619519990254,
"children": {
"process_trajectory": {
"total": 971.4136007002088,
"count": 199586,
"self": 970.3654773002075,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0481234000012591,
"count": 6,
"self": 1.0481234000012591
}
}
},
"_update_policy": {
"total": 6312.181473299997,
"count": 142,
"self": 530.4204363999861,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5781.761036900011,
"count": 4269,
"self": 5781.761036900011
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1000010999850929e-06,
"count": 1,
"self": 1.1000010999850929e-06
},
"TrainerController._save_models": {
"total": 0.16017719999945257,
"count": 1,
"self": 0.007492199998523574,
"children": {
"RLTrainer._checkpoint": {
"total": 0.152685000000929,
"count": 1,
"self": 0.152685000000929
}
}
}
}
}
}
}