esperesa's picture
ppo SnowballTarget
80a9a42 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9233230352401733,
"min": 0.9183968901634216,
"max": 2.874936819076538,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8826.044921875,
"min": 8826.044921875,
"max": 29473.8515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.75759506225586,
"min": 0.43595898151397705,
"max": 12.75759506225586,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2487.73095703125,
"min": 84.57604217529297,
"max": 2563.640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06840837000174146,
"min": 0.06331326498070221,
"max": 0.07538211859783212,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27363348000696586,
"min": 0.25325305992280883,
"max": 0.3734950827019216,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21155869821999584,
"min": 0.11055265141390812,
"max": 0.2554019814466729,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8462347928799834,
"min": 0.44221060565563247,
"max": 1.2682036234467637,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 2.977272727272727,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 131.0,
"max": 1383.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 2.977272727272727,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 131.0,
"max": 1383.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726719330",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/luyang/miniconda3/envs/rl/bin/mlagents-learn ./config/ppo/SnowballTarget.yml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget.x86_64 --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.21.2",
"end_time_seconds": "1726719610"
},
"total": 279.7842319989577,
"count": 1,
"self": 0.2693655928596854,
"children": {
"run_training.setup": {
"total": 0.023389742942526937,
"count": 1,
"self": 0.023389742942526937
},
"TrainerController.start_learning": {
"total": 279.4914766631555,
"count": 1,
"self": 0.3016328443773091,
"children": {
"TrainerController._reset_env": {
"total": 1.0745465229265392,
"count": 1,
"self": 1.0745465229265392
},
"TrainerController.advance": {
"total": 278.0625854067039,
"count": 18202,
"self": 0.14726616628468037,
"children": {
"env_step": {
"total": 277.9153192404192,
"count": 18202,
"self": 195.51846464676782,
"children": {
"SubprocessEnvManager._take_step": {
"total": 82.24586562998593,
"count": 18202,
"self": 0.8457684505265206,
"children": {
"TorchPolicy.evaluate": {
"total": 81.40009717945941,
"count": 18202,
"self": 81.40009717945941
}
}
},
"workers": {
"total": 0.15098896366544068,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 279.05951491976157,
"count": 18202,
"is_parallel": true,
"self": 122.91395532758906,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014038460794836283,
"count": 1,
"is_parallel": true,
"self": 0.00040356279350817204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010002832859754562,
"count": 10,
"is_parallel": true,
"self": 0.0010002832859754562
}
}
},
"UnityEnvironment.step": {
"total": 0.02158012497238815,
"count": 1,
"is_parallel": true,
"self": 0.00028138491325080395,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002248419914394617,
"count": 1,
"is_parallel": true,
"self": 0.0002248419914394617
},
"communicator.exchange": {
"total": 0.02010426507331431,
"count": 1,
"is_parallel": true,
"self": 0.02010426507331431
},
"steps_from_proto": {
"total": 0.0009696329943835735,
"count": 1,
"is_parallel": true,
"self": 0.0002049943432211876,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007646386511623859,
"count": 10,
"is_parallel": true,
"self": 0.0007646386511623859
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 156.1455595921725,
"count": 18201,
"is_parallel": true,
"self": 5.008005099371076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6367693501524627,
"count": 18201,
"is_parallel": true,
"self": 2.6367693501524627
},
"communicator.exchange": {
"total": 131.84950335300528,
"count": 18201,
"is_parallel": true,
"self": 131.84950335300528
},
"steps_from_proto": {
"total": 16.65128178964369,
"count": 18201,
"is_parallel": true,
"self": 3.1236582833807915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.527623506262898,
"count": 182010,
"is_parallel": true,
"self": 13.527623506262898
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001264270395040512,
"count": 1,
"self": 0.0001264270395040512,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 276.20801323419437,
"count": 477153,
"is_parallel": true,
"self": 5.204548853216693,
"children": {
"process_trajectory": {
"total": 146.2149193333462,
"count": 477153,
"is_parallel": true,
"self": 145.87083225068636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3440870826598257,
"count": 4,
"is_parallel": true,
"self": 0.3440870826598257
}
}
},
"_update_policy": {
"total": 124.78854504763149,
"count": 90,
"is_parallel": true,
"self": 31.466559036402032,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.32198601122946,
"count": 4587,
"is_parallel": true,
"self": 93.32198601122946
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.05258546210825443,
"count": 1,
"self": 0.0007277692202478647,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05185769288800657,
"count": 1,
"self": 0.05185769288800657
}
}
}
}
}
}
}