sayby's picture
First push
de7f2e4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6491749882698059,
"min": 0.649020254611969,
"max": 2.8507027626037598,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6669.6240234375,
"min": 6316.84765625,
"max": 29194.046875,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.671067237854004,
"min": 0.3977971076965332,
"max": 13.677200317382812,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2788.897705078125,
"min": 77.17263793945312,
"max": 2793.63525390625,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06707723103484194,
"min": 0.06297465502600115,
"max": 0.07235257923770834,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.33538615517420967,
"min": 0.2518986201040046,
"max": 0.3617628961885417,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1662632614198853,
"min": 0.14398856543839963,
"max": 0.2722596528775552,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8313163070994265,
"min": 0.5759542617535985,
"max": 1.2830930434605654,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333332e-06,
"min": 5.288098237333332e-06,
"max": 0.0002945880018039999,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.644049118666666e-05,
"min": 2.644049118666666e-05,
"max": 0.00142344002552,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666664,
"min": 0.10176266666666664,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333332,
"min": 0.42025066666666666,
"max": 0.9744800000000001,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999995,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.563636363636363,
"min": 3.9545454545454546,
"max": 26.931818181818183,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1461.0,
"min": 174.0,
"max": 1480.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.563636363636363,
"min": 3.9545454545454546,
"max": 26.931818181818183,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1461.0,
"min": 174.0,
"max": 1480.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673355210",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673355957"
},
"total": 747.695849581,
"count": 1,
"self": 0.44000154199989083,
"children": {
"run_training.setup": {
"total": 0.1101272710000103,
"count": 1,
"self": 0.1101272710000103
},
"TrainerController.start_learning": {
"total": 747.145720768,
"count": 1,
"self": 0.9847370030025786,
"children": {
"TrainerController._reset_env": {
"total": 8.869172969999994,
"count": 1,
"self": 8.869172969999994
},
"TrainerController.advance": {
"total": 737.1307831919975,
"count": 27338,
"self": 0.406442237998931,
"children": {
"env_step": {
"total": 736.7243409539985,
"count": 27338,
"self": 509.3111922929928,
"children": {
"SubprocessEnvManager._take_step": {
"total": 227.00098935599632,
"count": 27338,
"self": 2.2642450419945703,
"children": {
"TorchPolicy.evaluate": {
"total": 224.73674431400175,
"count": 27338,
"self": 49.98210190200672,
"children": {
"TorchPolicy.sample_actions": {
"total": 174.75464241199504,
"count": 27338,
"self": 174.75464241199504
}
}
}
}
},
"workers": {
"total": 0.4121593050093679,
"count": 27338,
"self": 0.0,
"children": {
"worker_root": {
"total": 745.0567439719977,
"count": 27338,
"is_parallel": true,
"self": 401.1191881460007,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006116122999998197,
"count": 1,
"is_parallel": true,
"self": 0.0035537379999652785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025623850000329185,
"count": 10,
"is_parallel": true,
"self": 0.0025623850000329185
}
}
},
"UnityEnvironment.step": {
"total": 0.04229259100000604,
"count": 1,
"is_parallel": true,
"self": 0.0005334609999749773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002860110000142413,
"count": 1,
"is_parallel": true,
"self": 0.0002860110000142413
},
"communicator.exchange": {
"total": 0.03961933300001874,
"count": 1,
"is_parallel": true,
"self": 0.03961933300001874
},
"steps_from_proto": {
"total": 0.001853785999998081,
"count": 1,
"is_parallel": true,
"self": 0.0004184480000049007,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014353379999931803,
"count": 10,
"is_parallel": true,
"self": 0.0014353379999931803
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 343.937555825997,
"count": 27337,
"is_parallel": true,
"self": 13.13819655396145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.795633789012584,
"count": 27337,
"is_parallel": true,
"self": 7.795633789012584
},
"communicator.exchange": {
"total": 276.0538007310048,
"count": 27337,
"is_parallel": true,
"self": 276.0538007310048
},
"steps_from_proto": {
"total": 46.94992475201815,
"count": 27337,
"is_parallel": true,
"self": 10.065143597030698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.88478115498745,
"count": 273370,
"is_parallel": true,
"self": 36.88478115498745
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.1712999973242404e-05,
"count": 1,
"self": 4.1712999973242404e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 732.6759581959961,
"count": 511055,
"is_parallel": true,
"self": 13.911364919973494,
"children": {
"process_trajectory": {
"total": 369.12714352802317,
"count": 511055,
"is_parallel": true,
"self": 367.49676167602325,
"children": {
"RLTrainer._checkpoint": {
"total": 1.630381851999914,
"count": 6,
"is_parallel": true,
"self": 1.630381851999914
}
}
},
"_update_policy": {
"total": 349.6374497479994,
"count": 136,
"is_parallel": true,
"self": 112.74147900499935,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.89597074300008,
"count": 11555,
"is_parallel": true,
"self": 236.89597074300008
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1609858900000063,
"count": 1,
"self": 0.0016959619999852293,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15928992800002106,
"count": 1,
"self": 0.15928992800002106
}
}
}
}
}
}
}