{ "name": "root", "gauges": { "SnowballTarget.Policy.Entropy.mean": { "value": 0.9306353330612183, "min": 0.9174208045005798, "max": 2.886843681335449, "count": 19 }, "SnowballTarget.Policy.Entropy.sum": { "value": 8926.654296875, "min": 8814.162109375, "max": 28587.62890625, "count": 19 }, "SnowballTarget.Step.mean": { "value": 199992.0, "min": 19952.0, "max": 199992.0, "count": 19 }, "SnowballTarget.Step.sum": { "value": 199992.0, "min": 19952.0, "max": 199992.0, "count": 19 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": { "value": 12.929614067077637, "min": 0.09861564636230469, "max": 12.929614067077637, "count": 19 }, "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": { "value": 2585.9228515625, "min": 9.171255111694336, "max": 2629.3388671875, "count": 19 }, "SnowballTarget.Losses.PolicyLoss.mean": { "value": 0.06271438484956675, "min": 0.05937604545015704, "max": 0.07687418960735622, "count": 19 }, "SnowballTarget.Losses.PolicyLoss.sum": { "value": 0.31357192424783376, "min": 0.11875209090031408, "max": 0.38437094803678107, "count": 19 }, "SnowballTarget.Losses.ValueLoss.mean": { "value": 0.2128220895049619, "min": 0.11455785045308955, "max": 0.2771448446985553, "count": 19 }, "SnowballTarget.Losses.ValueLoss.sum": { "value": 1.0641104475248095, "min": 0.2291157009061791, "max": 1.3743064172712027, "count": 19 }, "SnowballTarget.Policy.LearningRate.mean": { "value": 6.68409777199999e-06, "min": 6.68409777199999e-06, "max": 0.00027233400922199995, "count": 19 }, "SnowballTarget.Policy.LearningRate.sum": { "value": 3.342048885999995e-05, "min": 3.342048885999995e-05, "max": 0.00123792008736, "count": 19 }, "SnowballTarget.Policy.Epsilon.mean": { "value": 0.10222800000000001, "min": 0.10222800000000001, "max": 0.19077799999999998, "count": 19 }, "SnowballTarget.Policy.Epsilon.sum": { "value": 0.51114, "min": 0.38155599999999995, "max": 0.91264, "count": 19 }, "SnowballTarget.Policy.Beta.mean": { "value": 0.00012117719999999986, "min": 0.00012117719999999986, "max": 0.0045398222, "count": 19 }, "SnowballTarget.Policy.Beta.sum": { "value": 0.0006058859999999993, "min": 0.0006058859999999993, "max": 0.020640736, "count": 19 }, "SnowballTarget.Environment.EpisodeLength.mean": { "value": 199.0, "min": 199.0, "max": 199.0, "count": 19 }, "SnowballTarget.Environment.EpisodeLength.sum": { "value": 10945.0, "min": 4378.0, "max": 10945.0, "count": 19 }, "SnowballTarget.Environment.CumulativeReward.mean": { "value": 25.3, "min": 3.0454545454545454, "max": 25.472727272727273, "count": 19 }, "SnowballTarget.Environment.CumulativeReward.sum": { "value": 1265.0, "min": 67.0, "max": 1401.0, "count": 19 }, "SnowballTarget.Policy.ExtrinsicReward.mean": { "value": 25.3, "min": 3.0454545454545454, "max": 25.472727272727273, "count": 19 }, "SnowballTarget.Policy.ExtrinsicReward.sum": { "value": 1265.0, "min": 67.0, "max": 1401.0, "count": 19 }, "SnowballTarget.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 19 }, "SnowballTarget.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 19 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1725276968", "python_version": "3.10.12 (main, Sep 2 2024, 16:47:36) [GCC 9.4.0]", "command_line_arguments": "/home/coder/.pyenv/versions/3.10.12/envs/deep-rl-unit5-3.10.12/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.4.0+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1725277223" }, "total": 254.95736005704384, "count": 1, "self": 0.2718341660220176, "children": { "run_training.setup": { "total": 0.014255571004468948, "count": 1, "self": 0.014255571004468948 }, "TrainerController.start_learning": { "total": 254.67127032001736, "count": 1, "self": 0.27585696789901704, "children": { "TrainerController._reset_env": { "total": 2.0844070439925417, "count": 1, "self": 2.0844070439925417 }, "TrainerController.advance": { "total": 252.244125985133, "count": 16800, "self": 0.2453883015550673, "children": { "env_step": { "total": 183.97890468756668, "count": 16800, "self": 146.9964452990098, "children": { "SubprocessEnvManager._take_step": { "total": 36.81382933520945, "count": 16800, "self": 0.9520696810213849, "children": { "TorchPolicy.evaluate": { "total": 35.86175965418806, "count": 16800, "self": 35.86175965418806 } } }, "workers": { "total": 0.1686300533474423, "count": 16800, "self": 0.0, "children": { "worker_root": { "total": 254.14779354829807, "count": 16800, "is_parallel": true, "self": 123.84739528095815, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0013005439541302621, "count": 1, "is_parallel": true, "self": 0.0004087469424121082, "children": { "_process_rank_one_or_two_observation": { "total": 0.000891797011718154, "count": 10, "is_parallel": true, "self": 0.000891797011718154 } } }, "UnityEnvironment.step": { "total": 0.02177219500299543, "count": 1, "is_parallel": true, "self": 0.00023311696713790298, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021367997396737337, "count": 1, "is_parallel": true, "self": 0.00021367997396737337 }, "communicator.exchange": { "total": 0.020536153053399175, "count": 1, "is_parallel": true, "self": 0.020536153053399175 }, "steps_from_proto": { "total": 0.0007892450084909797, "count": 1, "is_parallel": true, "self": 0.00017109396867454052, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006181510398164392, "count": 10, "is_parallel": true, "self": 0.0006181510398164392 } } } } } } }, "UnityEnvironment.step": { "total": 130.30039826733992, "count": 16799, "is_parallel": true, "self": 3.8429405740462244, "children": { "UnityEnvironment._generate_step_input": { "total": 2.014002131181769, "count": 16799, "is_parallel": true, "self": 2.014002131181769 }, "communicator.exchange": { "total": 112.2852336531505, "count": 16799, "is_parallel": true, "self": 112.2852336531505 }, "steps_from_proto": { "total": 12.158221908961423, "count": 16799, "is_parallel": true, "self": 2.3299664403893985, "children": { "_process_rank_one_or_two_observation": { "total": 9.828255468572024, "count": 167990, "is_parallel": true, "self": 9.828255468572024 } } } } } } } } } } }, "trainer_advance": { "total": 68.01983299601125, "count": 16800, "self": 0.3528277283767238, "children": { "process_trajectory": { "total": 15.628739993670024, "count": 16800, "self": 15.353698065679055, "children": { "RLTrainer._checkpoint": { "total": 0.27504192799096927, "count": 4, "self": 0.27504192799096927 } } }, "_update_policy": { "total": 52.0382652739645, "count": 84, "self": 26.969872011686675, "children": { "TorchPPOOptimizer.update": { "total": 25.068393262277823, "count": 4281, "self": 25.068393262277823 } } } } } } }, "trainer_threads": { "total": 8.849892765283585e-07, "count": 1, "self": 8.849892765283585e-07 }, "TrainerController._save_models": { "total": 0.06687943800352514, "count": 1, "self": 0.002466033969540149, "children": { "RLTrainer._checkpoint": { "total": 0.06441340403398499, "count": 1, "self": 0.06441340403398499 } } } } } } }