amb007's picture
First Push
f02817b verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7965095043182373,
"min": 0.7965095043182373,
"max": 2.8469808101654053,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7570.0263671875,
"min": 7570.0263671875,
"max": 29061.98046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.086065292358398,
"min": 0.39212319254875183,
"max": 13.306072235107422,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2551.78271484375,
"min": 76.0718994140625,
"max": 2706.6591796875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06170986971326284,
"min": 0.06170986971326284,
"max": 0.07407346117478857,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24683947885305135,
"min": 0.24683947885305135,
"max": 0.36762447569601453,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.25929319449499544,
"min": 0.11893989464984842,
"max": 0.2841741053351,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0371727779799818,
"min": 0.47575957859939366,
"max": 1.3374426061031865,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 3.8181818181818183,
"max": 26.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 168.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 3.8181818181818183,
"max": 26.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 168.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740840952",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --force --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740841386"
},
"total": 433.888946423,
"count": 1,
"self": 0.4395538910000596,
"children": {
"run_training.setup": {
"total": 0.02323520999993889,
"count": 1,
"self": 0.02323520999993889
},
"TrainerController.start_learning": {
"total": 433.426157322,
"count": 1,
"self": 0.35213818500380967,
"children": {
"TrainerController._reset_env": {
"total": 2.8761308899998994,
"count": 1,
"self": 2.8761308899998994
},
"TrainerController.advance": {
"total": 430.11060686999633,
"count": 18192,
"self": 0.3788312680043191,
"children": {
"env_step": {
"total": 305.63899279898953,
"count": 18192,
"self": 232.88223049198245,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.54429838399449,
"count": 18192,
"self": 1.3099818999937725,
"children": {
"TorchPolicy.evaluate": {
"total": 71.23431648400071,
"count": 18192,
"self": 71.23431648400071
}
}
},
"workers": {
"total": 0.2124639230125922,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 431.8856598140053,
"count": 18192,
"is_parallel": true,
"self": 227.64169964100495,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0054354259999627175,
"count": 1,
"is_parallel": true,
"self": 0.0038120500000786706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001623375999884047,
"count": 10,
"is_parallel": true,
"self": 0.001623375999884047
}
}
},
"UnityEnvironment.step": {
"total": 0.03509261600004265,
"count": 1,
"is_parallel": true,
"self": 0.0005600430001777568,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000369755999940935,
"count": 1,
"is_parallel": true,
"self": 0.000369755999940935
},
"communicator.exchange": {
"total": 0.03227780299994265,
"count": 1,
"is_parallel": true,
"self": 0.03227780299994265
},
"steps_from_proto": {
"total": 0.001885013999981311,
"count": 1,
"is_parallel": true,
"self": 0.00036247299976821523,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015225410002130957,
"count": 10,
"is_parallel": true,
"self": 0.0015225410002130957
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 204.24396017300035,
"count": 18191,
"is_parallel": true,
"self": 9.83376089100409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.416776858992421,
"count": 18191,
"is_parallel": true,
"self": 5.416776858992421
},
"communicator.exchange": {
"total": 156.94058146100747,
"count": 18191,
"is_parallel": true,
"self": 156.94058146100747
},
"steps_from_proto": {
"total": 32.05284096199637,
"count": 18191,
"is_parallel": true,
"self": 5.80490611699156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.247934845004806,
"count": 181910,
"is_parallel": true,
"self": 26.247934845004806
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 124.09278280300248,
"count": 18192,
"self": 0.43902123299812956,
"children": {
"process_trajectory": {
"total": 27.585990140003673,
"count": 18192,
"self": 27.090362848003792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.495627291999881,
"count": 4,
"self": 0.495627291999881
}
}
},
"_update_policy": {
"total": 96.06777143000068,
"count": 90,
"self": 37.935829941997895,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.13194148800278,
"count": 4587,
"self": 58.13194148800278
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.98000018626044e-07,
"count": 1,
"self": 9.98000018626044e-07
},
"TrainerController._save_models": {
"total": 0.08728037899993524,
"count": 1,
"self": 0.0009147729998630894,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08636560600007215,
"count": 1,
"self": 0.08636560600007215
}
}
}
}
}
}
}