jakelcoop's picture
First Push
34a768c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9087465405464172,
"min": 0.9087465405464172,
"max": 2.871007204055786,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8676.7119140625,
"min": 8676.7119140625,
"max": 29401.984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.07088565826416,
"min": 0.24461351335048676,
"max": 13.07088565826416,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2548.82275390625,
"min": 47.455020904541016,
"max": 2663.09228515625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0653268201935776,
"min": 0.06297014879133962,
"max": 0.07390237145134997,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2613072807743104,
"min": 0.2518805951653585,
"max": 0.35812305113348186,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20199263044724278,
"min": 0.1047705379371787,
"max": 0.2735509200423371,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8079705217889711,
"min": 0.4190821517487148,
"max": 1.3677546002116856,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.795454545454547,
"min": 2.8181818181818183,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1135.0,
"min": 124.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.795454545454547,
"min": 2.8181818181818183,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1135.0,
"min": 124.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689170655",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689171152"
},
"total": 496.533918441,
"count": 1,
"self": 0.4348712520000504,
"children": {
"run_training.setup": {
"total": 0.04576658899998165,
"count": 1,
"self": 0.04576658899998165
},
"TrainerController.start_learning": {
"total": 496.0532806,
"count": 1,
"self": 0.6058798240009651,
"children": {
"TrainerController._reset_env": {
"total": 4.751155872000027,
"count": 1,
"self": 4.751155872000027
},
"TrainerController.advance": {
"total": 490.55226977399906,
"count": 18201,
"self": 0.31110196700672077,
"children": {
"env_step": {
"total": 490.24116780699234,
"count": 18201,
"self": 356.44214756699756,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.4948495019937,
"count": 18201,
"self": 1.9923134769953776,
"children": {
"TorchPolicy.evaluate": {
"total": 131.50253602499834,
"count": 18201,
"self": 131.50253602499834
}
}
},
"workers": {
"total": 0.3041707380010621,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 494.3585089249972,
"count": 18201,
"is_parallel": true,
"self": 230.46396713199562,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005779144999962682,
"count": 1,
"is_parallel": true,
"self": 0.003982582999924489,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017965620000381932,
"count": 10,
"is_parallel": true,
"self": 0.0017965620000381932
}
}
},
"UnityEnvironment.step": {
"total": 0.05887842999999293,
"count": 1,
"is_parallel": true,
"self": 0.0008331500000622327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041225100000019665,
"count": 1,
"is_parallel": true,
"self": 0.00041225100000019665
},
"communicator.exchange": {
"total": 0.054533852999952614,
"count": 1,
"is_parallel": true,
"self": 0.054533852999952614
},
"steps_from_proto": {
"total": 0.003099175999977888,
"count": 1,
"is_parallel": true,
"self": 0.00046053099998744074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002638644999990447,
"count": 10,
"is_parallel": true,
"self": 0.002638644999990447
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 263.89454179300157,
"count": 18200,
"is_parallel": true,
"self": 11.10396821602643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.691071338987001,
"count": 18200,
"is_parallel": true,
"self": 5.691071338987001
},
"communicator.exchange": {
"total": 207.68186093600343,
"count": 18200,
"is_parallel": true,
"self": 207.68186093600343
},
"steps_from_proto": {
"total": 39.4176413019847,
"count": 18200,
"is_parallel": true,
"self": 7.33252605497205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.08511524701265,
"count": 182000,
"is_parallel": true,
"self": 32.08511524701265
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014628199994604074,
"count": 1,
"self": 0.00014628199994604074,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 486.8113978270009,
"count": 457028,
"is_parallel": true,
"self": 10.297528911982226,
"children": {
"process_trajectory": {
"total": 269.54790514601757,
"count": 457028,
"is_parallel": true,
"self": 267.40488563901766,
"children": {
"RLTrainer._checkpoint": {
"total": 2.143019506999906,
"count": 4,
"is_parallel": true,
"self": 2.143019506999906
}
}
},
"_update_policy": {
"total": 206.96596376900112,
"count": 90,
"is_parallel": true,
"self": 77.38786005699751,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.5781037120036,
"count": 4584,
"is_parallel": true,
"self": 129.5781037120036
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1438288479999983,
"count": 1,
"self": 0.0009008600000015576,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14292798799999673,
"count": 1,
"self": 0.14292798799999673
}
}
}
}
}
}
}