Tingwen's picture
First Push
468725a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9893794655799866,
"min": 0.980243444442749,
"max": 2.8652429580688477,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9435.7119140625,
"min": 9435.7119140625,
"max": 29405.98828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.947700500488281,
"min": 0.2987855076789856,
"max": 12.947700500488281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2524.801513671875,
"min": 57.96438980102539,
"max": 2630.80615234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06674956334899987,
"min": 0.06504584097732304,
"max": 0.07572208442397645,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2669982533959995,
"min": 0.2610530887693897,
"max": 0.363526012319341,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19845225954172657,
"min": 0.1130397945058112,
"max": 0.3238171533334489,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7938090381669063,
"min": 0.4521591780232448,
"max": 1.4884780705632532,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.0,
"min": 3.090909090909091,
"max": 25.581818181818182,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1100.0,
"min": 136.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.0,
"min": 3.090909090909091,
"max": 25.581818181818182,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1100.0,
"min": 136.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684571326",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684571912"
},
"total": 585.8853539450001,
"count": 1,
"self": 0.9225419660000398,
"children": {
"run_training.setup": {
"total": 0.05010019700000612,
"count": 1,
"self": 0.05010019700000612
},
"TrainerController.start_learning": {
"total": 584.9127117820001,
"count": 1,
"self": 0.8304974340054514,
"children": {
"TrainerController._reset_env": {
"total": 0.9114929539999821,
"count": 1,
"self": 0.9114929539999821
},
"TrainerController.advance": {
"total": 582.9247080719947,
"count": 18213,
"self": 0.40195173698486997,
"children": {
"env_step": {
"total": 582.5227563350098,
"count": 18213,
"self": 476.82408980001094,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.3137127460065,
"count": 18213,
"self": 2.524037138011863,
"children": {
"TorchPolicy.evaluate": {
"total": 102.78967560799464,
"count": 18213,
"self": 102.78967560799464
}
}
},
"workers": {
"total": 0.38495378899239086,
"count": 18213,
"self": 0.0,
"children": {
"worker_root": {
"total": 582.7375678229903,
"count": 18213,
"is_parallel": true,
"self": 252.0952651749974,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0075640219999968394,
"count": 1,
"is_parallel": true,
"self": 0.005632385999888356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019316360001084831,
"count": 10,
"is_parallel": true,
"self": 0.0019316360001084831
}
}
},
"UnityEnvironment.step": {
"total": 0.04021935299999768,
"count": 1,
"is_parallel": true,
"self": 0.0006764459999430983,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000298360000044795,
"count": 1,
"is_parallel": true,
"self": 0.000298360000044795
},
"communicator.exchange": {
"total": 0.036856268000008185,
"count": 1,
"is_parallel": true,
"self": 0.036856268000008185
},
"steps_from_proto": {
"total": 0.002388279000001603,
"count": 1,
"is_parallel": true,
"self": 0.0004258379999555473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019624410000460557,
"count": 10,
"is_parallel": true,
"self": 0.0019624410000460557
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 330.6423026479929,
"count": 18212,
"is_parallel": true,
"self": 13.493656320986702,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.357036839001012,
"count": 18212,
"is_parallel": true,
"self": 7.357036839001012
},
"communicator.exchange": {
"total": 264.08301359200095,
"count": 18212,
"is_parallel": true,
"self": 264.08301359200095
},
"steps_from_proto": {
"total": 45.708595896004226,
"count": 18212,
"is_parallel": true,
"self": 9.066692275049888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.64190362095434,
"count": 182120,
"is_parallel": true,
"self": 36.64190362095434
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022374099989974638,
"count": 1,
"self": 0.00022374099989974638,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 578.1844241699981,
"count": 563943,
"is_parallel": true,
"self": 12.910836793039493,
"children": {
"process_trajectory": {
"total": 307.5770429859589,
"count": 563943,
"is_parallel": true,
"self": 304.0992559059589,
"children": {
"RLTrainer._checkpoint": {
"total": 3.4777870799999846,
"count": 4,
"is_parallel": true,
"self": 3.4777870799999846
}
}
},
"_update_policy": {
"total": 257.6965443909998,
"count": 90,
"is_parallel": true,
"self": 88.55778995199807,
"children": {
"TorchPPOOptimizer.update": {
"total": 169.13875443900173,
"count": 4584,
"is_parallel": true,
"self": 169.13875443900173
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24578958100005366,
"count": 1,
"self": 0.0014214220000212663,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2443681590000324,
"count": 1,
"self": 0.2443681590000324
}
}
}
}
}
}
}