robotman0's picture
First push
b7fe0c8
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8915648460388184,
"min": 0.8915648460388184,
"max": 2.868549108505249,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8512.6611328125,
"min": 8512.6611328125,
"max": 29376.8125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.073209762573242,
"min": 0.3932214379310608,
"max": 13.073209762573242,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2549.27587890625,
"min": 76.28495788574219,
"max": 2645.06982421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07079323925084763,
"min": 0.06667173291469797,
"max": 0.07393324183809169,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2831729570033905,
"min": 0.26668693165879187,
"max": 0.35957362339261223,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18359293861716403,
"min": 0.12699982019680023,
"max": 0.281303578293791,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7343717544686561,
"min": 0.5079992807872009,
"max": 1.3989846054829804,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.931818181818183,
"min": 3.477272727272727,
"max": 25.931818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1141.0,
"min": 153.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.931818181818183,
"min": 3.477272727272727,
"max": 25.931818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1141.0,
"min": 153.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675181690",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675182109"
},
"total": 418.67050747199994,
"count": 1,
"self": 0.3900846779998801,
"children": {
"run_training.setup": {
"total": 0.11378283499993813,
"count": 1,
"self": 0.11378283499993813
},
"TrainerController.start_learning": {
"total": 418.1666399590001,
"count": 1,
"self": 0.5258126670007641,
"children": {
"TrainerController._reset_env": {
"total": 9.21334479199993,
"count": 1,
"self": 9.21334479199993
},
"TrainerController.advance": {
"total": 408.31165400399925,
"count": 18202,
"self": 0.2545810269938329,
"children": {
"env_step": {
"total": 408.0570729770054,
"count": 18202,
"self": 268.02353787202514,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.7733319329909,
"count": 18202,
"self": 1.3351745869808838,
"children": {
"TorchPolicy.evaluate": {
"total": 138.43815734601003,
"count": 18202,
"self": 29.950856596006588,
"children": {
"TorchPolicy.sample_actions": {
"total": 108.48730075000344,
"count": 18202,
"self": 108.48730075000344
}
}
}
}
},
"workers": {
"total": 0.26020317198936027,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 416.99780855201107,
"count": 18202,
"is_parallel": true,
"self": 201.14182913300738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009359103999940999,
"count": 1,
"is_parallel": true,
"self": 0.00422565299982125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005133451000119749,
"count": 10,
"is_parallel": true,
"self": 0.005133451000119749
}
}
},
"UnityEnvironment.step": {
"total": 0.02915844799997558,
"count": 1,
"is_parallel": true,
"self": 0.00034214499987683666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034356900005150237,
"count": 1,
"is_parallel": true,
"self": 0.00034356900005150237
},
"communicator.exchange": {
"total": 0.02703253599997879,
"count": 1,
"is_parallel": true,
"self": 0.02703253599997879
},
"steps_from_proto": {
"total": 0.0014401980000684489,
"count": 1,
"is_parallel": true,
"self": 0.0003672899997582135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010729080003102354,
"count": 10,
"is_parallel": true,
"self": 0.0010729080003102354
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 215.8559794190037,
"count": 18201,
"is_parallel": true,
"self": 8.203937898039726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.798919756993428,
"count": 18201,
"is_parallel": true,
"self": 4.798919756993428
},
"communicator.exchange": {
"total": 173.9338994869936,
"count": 18201,
"is_parallel": true,
"self": 173.9338994869936
},
"steps_from_proto": {
"total": 28.91922227697694,
"count": 18201,
"is_parallel": true,
"self": 6.150497173990175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.768725102986764,
"count": 182010,
"is_parallel": true,
"self": 22.768725102986764
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.092800008947961e-05,
"count": 1,
"self": 4.092800008947961e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 405.5467648249819,
"count": 335634,
"is_parallel": true,
"self": 8.530060390928611,
"children": {
"process_trajectory": {
"total": 232.74822150405248,
"count": 335634,
"is_parallel": true,
"self": 232.07068571905245,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6775357850000319,
"count": 4,
"is_parallel": true,
"self": 0.6775357850000319
}
}
},
"_update_policy": {
"total": 164.2684829300008,
"count": 90,
"is_parallel": true,
"self": 41.13552991700237,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.13295301299843,
"count": 4587,
"is_parallel": true,
"self": 123.13295301299843
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11578756800008705,
"count": 1,
"self": 0.000829520000024786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11495804800006226,
"count": 1,
"self": 0.11495804800006226
}
}
}
}
}
}
}