Nebyx's picture
First Push
e8d5ccc
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1101230382919312,
"min": 1.1101230382919312,
"max": 2.8806204795837402,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10648.2998046875,
"min": 10648.2998046875,
"max": 31686.82421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.994396209716797,
"min": 0.1882990151643753,
"max": 11.994396209716797,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2338.9072265625,
"min": 36.53001022338867,
"max": 2405.818359375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.227272727272727,
"min": 3.3181818181818183,
"max": 24.345454545454544,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1066.0,
"min": 146.0,
"max": 1339.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.227272727272727,
"min": 3.3181818181818183,
"max": 24.345454545454544,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1066.0,
"min": 146.0,
"max": 1339.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.050575832431004906,
"min": 0.04357562946159647,
"max": 0.05363038236730298,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1517274972930147,
"min": 0.09223980586125086,
"max": 0.16089114710190894,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20023308986290758,
"min": 0.1301164074617828,
"max": 0.29156228821902047,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6006992695887228,
"min": 0.3903492223853484,
"max": 0.8746868646570614,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.348097550666666e-06,
"min": 7.348097550666666e-06,
"max": 0.00029034800321733336,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.2044292652e-05,
"min": 2.2044292652e-05,
"max": 0.0008710440096520001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10244933333333334,
"min": 0.10244933333333334,
"max": 0.1967826666666667,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.307348,
"min": 0.307348,
"max": 0.5903480000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001322217333333333,
"min": 0.0001322217333333333,
"max": 0.004839455066666666,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003966651999999999,
"min": 0.0003966651999999999,
"max": 0.0145183652,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692626772",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692627265"
},
"total": 492.5137000500001,
"count": 1,
"self": 0.4344578120001188,
"children": {
"run_training.setup": {
"total": 0.04907917900004577,
"count": 1,
"self": 0.04907917900004577
},
"TrainerController.start_learning": {
"total": 492.03016305899996,
"count": 1,
"self": 0.5566386019906986,
"children": {
"TrainerController._reset_env": {
"total": 4.190975722000076,
"count": 1,
"self": 4.190975722000076
},
"TrainerController.advance": {
"total": 487.14454597300914,
"count": 18206,
"self": 0.2616028799977812,
"children": {
"env_step": {
"total": 486.88294309301136,
"count": 18206,
"self": 352.02959792604497,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.58112317298833,
"count": 18206,
"self": 1.6955244179866895,
"children": {
"TorchPolicy.evaluate": {
"total": 132.88559875500164,
"count": 18206,
"self": 132.88559875500164
}
}
},
"workers": {
"total": 0.27222199397806435,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 490.51185866499964,
"count": 18206,
"is_parallel": true,
"self": 246.17297120100784,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005777867999995578,
"count": 1,
"is_parallel": true,
"self": 0.004306941999857372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014709260001382063,
"count": 10,
"is_parallel": true,
"self": 0.0014709260001382063
}
}
},
"UnityEnvironment.step": {
"total": 0.034516521999876204,
"count": 1,
"is_parallel": true,
"self": 0.000570753000147306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035641899989968806,
"count": 1,
"is_parallel": true,
"self": 0.00035641899989968806
},
"communicator.exchange": {
"total": 0.031487217999938366,
"count": 1,
"is_parallel": true,
"self": 0.031487217999938366
},
"steps_from_proto": {
"total": 0.002102131999890844,
"count": 1,
"is_parallel": true,
"self": 0.0005040689998168091,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015980630000740348,
"count": 10,
"is_parallel": true,
"self": 0.0015980630000740348
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.3388874639918,
"count": 18205,
"is_parallel": true,
"self": 10.20632819593311,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.218283486015935,
"count": 18205,
"is_parallel": true,
"self": 5.218283486015935
},
"communicator.exchange": {
"total": 193.19381641802306,
"count": 18205,
"is_parallel": true,
"self": 193.19381641802306
},
"steps_from_proto": {
"total": 35.72045936401969,
"count": 18205,
"is_parallel": true,
"self": 6.5304092980306905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.190050065989,
"count": 182050,
"is_parallel": true,
"self": 29.190050065989
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015249800003402925,
"count": 1,
"self": 0.00015249800003402925,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 483.19628459293676,
"count": 478851,
"is_parallel": true,
"self": 10.439729617871762,
"children": {
"process_trajectory": {
"total": 267.1323726490655,
"count": 478851,
"is_parallel": true,
"self": 266.37293732206535,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7594353270001193,
"count": 4,
"is_parallel": true,
"self": 0.7594353270001193
}
}
},
"_update_policy": {
"total": 205.62418232599953,
"count": 58,
"is_parallel": true,
"self": 102.88833159701016,
"children": {
"TorchPPOOptimizer.update": {
"total": 102.73585072898936,
"count": 3775,
"is_parallel": true,
"self": 102.73585072898936
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13785026400000788,
"count": 1,
"self": 0.0008885879999525059,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13696167600005538,
"count": 1,
"self": 0.13696167600005538
}
}
}
}
}
}
}