kupru's picture
first attempt
4d2cebb
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9905679821968079,
"min": 0.9905679821968079,
"max": 2.870112419128418,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9479.7353515625,
"min": 9479.7353515625,
"max": 29392.8203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.542269706726074,
"min": 0.436093270778656,
"max": 12.641898155212402,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2445.74267578125,
"min": 84.60209655761719,
"max": 2578.947265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06483898489372508,
"min": 0.06352636912976653,
"max": 0.07632039742772986,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25935593957490033,
"min": 0.2541054765190661,
"max": 0.37813210600242936,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21808049170409932,
"min": 0.1486261908556152,
"max": 0.2853587735517352,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8723219668163973,
"min": 0.5945047634224608,
"max": 1.426793867758676,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.25,
"min": 3.659090909090909,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1067.0,
"min": 161.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.25,
"min": 3.659090909090909,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1067.0,
"min": 161.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695394432",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695394901"
},
"total": 468.565025098,
"count": 1,
"self": 0.4315352239996173,
"children": {
"run_training.setup": {
"total": 0.04176162900012059,
"count": 1,
"self": 0.04176162900012059
},
"TrainerController.start_learning": {
"total": 468.09172824500024,
"count": 1,
"self": 0.514204477987505,
"children": {
"TrainerController._reset_env": {
"total": 5.107928096000023,
"count": 1,
"self": 5.107928096000023
},
"TrainerController.advance": {
"total": 462.3062296520127,
"count": 18204,
"self": 0.259350869028367,
"children": {
"env_step": {
"total": 462.0468787829843,
"count": 18204,
"self": 335.5412605870131,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.23264098198888,
"count": 18204,
"self": 1.7489456039616016,
"children": {
"TorchPolicy.evaluate": {
"total": 124.48369537802728,
"count": 18204,
"self": 124.48369537802728
}
}
},
"workers": {
"total": 0.2729772139823581,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.533233262963,
"count": 18204,
"is_parallel": true,
"self": 221.27764433995458,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005599847999974372,
"count": 1,
"is_parallel": true,
"self": 0.003952773999799319,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016470740001750528,
"count": 10,
"is_parallel": true,
"self": 0.0016470740001750528
}
}
},
"UnityEnvironment.step": {
"total": 0.07310378800002582,
"count": 1,
"is_parallel": true,
"self": 0.0006380299998909322,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004580760000862938,
"count": 1,
"is_parallel": true,
"self": 0.0004580760000862938
},
"communicator.exchange": {
"total": 0.0655762330000016,
"count": 1,
"is_parallel": true,
"self": 0.0655762330000016
},
"steps_from_proto": {
"total": 0.006431449000046996,
"count": 1,
"is_parallel": true,
"self": 0.0004294929995012353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006001956000545761,
"count": 10,
"is_parallel": true,
"self": 0.006001956000545761
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.25558892300842,
"count": 18203,
"is_parallel": true,
"self": 10.398779897999475,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.246553642999743,
"count": 18203,
"is_parallel": true,
"self": 5.246553642999743
},
"communicator.exchange": {
"total": 193.51584178901112,
"count": 18203,
"is_parallel": true,
"self": 193.51584178901112
},
"steps_from_proto": {
"total": 36.09441359299808,
"count": 18203,
"is_parallel": true,
"self": 6.484813412918129,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.60960018007995,
"count": 182030,
"is_parallel": true,
"self": 29.60960018007995
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017802000002120622,
"count": 1,
"self": 0.00017802000002120622,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 458.7752014971038,
"count": 437497,
"is_parallel": true,
"self": 9.64992278710929,
"children": {
"process_trajectory": {
"total": 250.02878089399564,
"count": 437497,
"is_parallel": true,
"self": 249.2222624149956,
"children": {
"RLTrainer._checkpoint": {
"total": 0.806518479000033,
"count": 4,
"is_parallel": true,
"self": 0.806518479000033
}
}
},
"_update_policy": {
"total": 199.09649781599887,
"count": 90,
"is_parallel": true,
"self": 79.48694423101233,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.60955358498654,
"count": 4584,
"is_parallel": true,
"self": 119.60955358498654
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16318799900000158,
"count": 1,
"self": 0.0008954439999797614,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16229255500002182,
"count": 1,
"self": 0.16229255500002182
}
}
}
}
}
}
}