GrumpyPants's picture
First Push
b2d8582
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5859648585319519,
"min": 0.5837823152542114,
"max": 2.8703906536102295,
"count": 60
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5955.7470703125,
"min": 5625.068359375,
"max": 29395.669921875,
"count": 60
},
"SnowballTarget.Step.mean": {
"value": 599936.0,
"min": 9952.0,
"max": 599936.0,
"count": 60
},
"SnowballTarget.Step.sum": {
"value": 599936.0,
"min": 9952.0,
"max": 599936.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.774064064025879,
"min": 0.3227589428424835,
"max": 13.937908172607422,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2672.16845703125,
"min": 62.615234375,
"max": 2853.169921875,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 60
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06854933200634596,
"min": 0.06311415631217625,
"max": 0.07902527435291239,
"count": 60
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2741973280253838,
"min": 0.252456625248705,
"max": 0.39512637176456195,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17284183248000984,
"min": 0.10839151552029172,
"max": 0.2787145684454955,
"count": 60
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6913673299200394,
"min": 0.4335660620811669,
"max": 1.3468804704208002,
"count": 60
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.4940991686666686e-06,
"min": 2.4940991686666686e-06,
"max": 0.00029729400090199997,
"count": 60
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.976396674666675e-06,
"min": 9.976396674666675e-06,
"max": 0.00146172001276,
"count": 60
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10083133333333334,
"min": 0.10083133333333334,
"max": 0.19909800000000002,
"count": 60
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40332533333333337,
"min": 0.40332533333333337,
"max": 0.9872400000000001,
"count": 60
},
"SnowballTarget.Policy.Beta.mean": {
"value": 5.148353333333336e-05,
"min": 5.148353333333336e-05,
"max": 0.0049549902,
"count": 60
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00020593413333333345,
"min": 0.00020593413333333345,
"max": 0.024363276,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.181818181818183,
"min": 3.0,
"max": 27.822222222222223,
"count": 60
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1196.0,
"min": 132.0,
"max": 1523.0,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.181818181818183,
"min": 3.0,
"max": 27.822222222222223,
"count": 60
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1196.0,
"min": 132.0,
"max": 1523.0,
"count": 60
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 60
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673720850",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673722126"
},
"total": 1275.7035026610001,
"count": 1,
"self": 0.42305921100023625,
"children": {
"run_training.setup": {
"total": 0.10846687699995528,
"count": 1,
"self": 0.10846687699995528
},
"TrainerController.start_learning": {
"total": 1275.171976573,
"count": 1,
"self": 1.5294065820853575,
"children": {
"TrainerController._reset_env": {
"total": 6.302745676000086,
"count": 1,
"self": 6.302745676000086
},
"TrainerController.advance": {
"total": 1267.1959304349143,
"count": 54601,
"self": 0.755030854920733,
"children": {
"env_step": {
"total": 1266.4408995799936,
"count": 54601,
"self": 835.7471162530023,
"children": {
"SubprocessEnvManager._take_step": {
"total": 429.91803064795704,
"count": 54601,
"self": 4.0987594230132345,
"children": {
"TorchPolicy.evaluate": {
"total": 425.8192712249438,
"count": 54601,
"self": 90.54026988792566,
"children": {
"TorchPolicy.sample_actions": {
"total": 335.27900133701814,
"count": 54601,
"self": 335.27900133701814
}
}
}
}
},
"workers": {
"total": 0.7757526790342126,
"count": 54601,
"self": 0.0,
"children": {
"worker_root": {
"total": 1271.497405151993,
"count": 54601,
"is_parallel": true,
"self": 620.6493727950406,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00205601800007571,
"count": 1,
"is_parallel": true,
"self": 0.0007059279996610712,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013500900004146388,
"count": 10,
"is_parallel": true,
"self": 0.0013500900004146388
}
}
},
"UnityEnvironment.step": {
"total": 0.0334840739997162,
"count": 1,
"is_parallel": true,
"self": 0.00036137899951427244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003125020002698875,
"count": 1,
"is_parallel": true,
"self": 0.0003125020002698875
},
"communicator.exchange": {
"total": 0.03093966400001591,
"count": 1,
"is_parallel": true,
"self": 0.03093966400001591
},
"steps_from_proto": {
"total": 0.0018705289999161323,
"count": 1,
"is_parallel": true,
"self": 0.00043810099941765657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014324280004984757,
"count": 10,
"is_parallel": true,
"self": 0.0014324280004984757
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 650.8480323569524,
"count": 54600,
"is_parallel": true,
"self": 24.55480428998044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.54018202491443,
"count": 54600,
"is_parallel": true,
"self": 14.54018202491443
},
"communicator.exchange": {
"total": 514.5446681630292,
"count": 54600,
"is_parallel": true,
"self": 514.5446681630292
},
"steps_from_proto": {
"total": 97.20837787902838,
"count": 54600,
"is_parallel": true,
"self": 19.23463584680485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.97374203222353,
"count": 546000,
"is_parallel": true,
"self": 77.97374203222353
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.675600030168425e-05,
"count": 1,
"self": 4.675600030168425e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1258.865249197131,
"count": 1015488,
"is_parallel": true,
"self": 25.81629298707503,
"children": {
"process_trajectory": {
"total": 713.3570200330587,
"count": 1015488,
"is_parallel": true,
"self": 710.5809805580584,
"children": {
"RLTrainer._checkpoint": {
"total": 2.776039475000289,
"count": 12,
"is_parallel": true,
"self": 2.776039475000289
}
}
},
"_update_policy": {
"total": 519.6919361769974,
"count": 272,
"is_parallel": true,
"self": 130.3523132490027,
"children": {
"TorchPPOOptimizer.update": {
"total": 389.3396229279947,
"count": 13869,
"is_parallel": true,
"self": 389.3396229279947
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1438471239998762,
"count": 1,
"self": 0.0033045899999706307,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14054253399990557,
"count": 1,
"self": 0.14054253399990557
}
}
}
}
}
}
}