ppo-Pyramids / run_logs /timers.json
ZhaoxiZheng's picture
First Push
f25a60b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31118911504745483,
"min": 0.30987805128097534,
"max": 1.4798107147216797,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9405.3798828125,
"min": 9261.634765625,
"max": 44891.5390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989971.0,
"min": 29952.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989971.0,
"min": 29952.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6827241778373718,
"min": -0.18512026965618134,
"max": 0.7158223390579224,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 193.2109375,
"min": -43.873504638671875,
"max": 208.30430603027344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019785964861512184,
"min": 0.0022637764923274517,
"max": 0.3247827887535095,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.599428176879883,
"min": 0.574999213218689,
"max": 76.97351837158203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06358976356638041,
"min": 0.06358976356638041,
"max": 0.07413574998715834,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8902566899293257,
"min": 0.4627963032750092,
"max": 1.0391918089607153,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014123367347998456,
"min": 0.0002900175242562183,
"max": 0.01801295071310874,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19772714287197837,
"min": 0.0023201401940497464,
"max": 0.25218130998352234,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.578654616671431e-06,
"min": 7.578654616671431e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010610116463340003,
"min": 0.00010610116463340003,
"max": 0.0033834743721753,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252618571428572,
"min": 0.10252618571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353666,
"min": 1.327104,
"max": 2.5278247,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026236595285714293,
"min": 0.00026236595285714293,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003673123340000001,
"min": 0.003673123340000001,
"max": 0.11280968753000004,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010815544985234737,
"min": 0.010815544985234737,
"max": 0.44237253069877625,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15141762793064117,
"min": 0.15141762793064117,
"max": 3.0966076850891113,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 278.13,
"min": 248.27272727272728,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27813.0,
"min": 15984.0,
"max": 31968.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6818519852310418,
"min": -1.0000000521540642,
"max": 1.7258636211807077,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.1851985231042,
"min": -32.000001668930054,
"max": 207.9571982100606,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6818519852310418,
"min": -1.0000000521540642,
"max": 1.7258636211807077,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.1851985231042,
"min": -32.000001668930054,
"max": 207.9571982100606,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030918405601732958,
"min": 0.028007515557235383,
"max": 9.338343483395875,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0918405601732957,
"min": 3.0918405601732957,
"max": 149.413495734334,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735155516",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735157111"
},
"total": 1594.686857048,
"count": 1,
"self": 0.32290964999992866,
"children": {
"run_training.setup": {
"total": 0.05529550699975516,
"count": 1,
"self": 0.05529550699975516
},
"TrainerController.start_learning": {
"total": 1594.3086518910004,
"count": 1,
"self": 1.0989483202201882,
"children": {
"TrainerController._reset_env": {
"total": 4.828269435000038,
"count": 1,
"self": 4.828269435000038
},
"TrainerController.advance": {
"total": 1588.2962982707804,
"count": 64305,
"self": 1.128837062790808,
"children": {
"env_step": {
"total": 1008.9481471770187,
"count": 64305,
"self": 887.9773703100982,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.30348505102847,
"count": 64305,
"self": 3.933692541923847,
"children": {
"TorchPolicy.evaluate": {
"total": 116.36979250910463,
"count": 62570,
"self": 116.36979250910463
}
}
},
"workers": {
"total": 0.6672918158919856,
"count": 64305,
"self": 0.0,
"children": {
"worker_root": {
"total": 1592.5287438390314,
"count": 64305,
"is_parallel": true,
"self": 787.4921015800564,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002048862000265217,
"count": 1,
"is_parallel": true,
"self": 0.0006436590010707732,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014052029991944437,
"count": 8,
"is_parallel": true,
"self": 0.0014052029991944437
}
}
},
"UnityEnvironment.step": {
"total": 0.035248400999989826,
"count": 1,
"is_parallel": true,
"self": 0.0004088339992449619,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002974990002257982,
"count": 1,
"is_parallel": true,
"self": 0.0002974990002257982
},
"communicator.exchange": {
"total": 0.03357177900034003,
"count": 1,
"is_parallel": true,
"self": 0.03357177900034003
},
"steps_from_proto": {
"total": 0.0009702890001790365,
"count": 1,
"is_parallel": true,
"self": 0.00022150299901113613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007487860011679004,
"count": 8,
"is_parallel": true,
"self": 0.0007487860011679004
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 805.036642258975,
"count": 64304,
"is_parallel": true,
"self": 20.112847677958598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.30974388001141,
"count": 64304,
"is_parallel": true,
"self": 13.30974388001141
},
"communicator.exchange": {
"total": 714.3880361340607,
"count": 64304,
"is_parallel": true,
"self": 714.3880361340607
},
"steps_from_proto": {
"total": 57.226014566944286,
"count": 64304,
"is_parallel": true,
"self": 12.015926416107504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 45.21008815083678,
"count": 514432,
"is_parallel": true,
"self": 45.21008815083678
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 578.219314030971,
"count": 64305,
"self": 2.0897318569031995,
"children": {
"process_trajectory": {
"total": 111.83856276807819,
"count": 64305,
"self": 111.60626052707721,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23230224100097985,
"count": 2,
"self": 0.23230224100097985
}
}
},
"_update_policy": {
"total": 464.2910194059896,
"count": 438,
"self": 255.22920347094168,
"children": {
"TorchPPOOptimizer.update": {
"total": 209.0618159350479,
"count": 22821,
"self": 209.0618159350479
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.57999873207882e-07,
"count": 1,
"self": 9.57999873207882e-07
},
"TrainerController._save_models": {
"total": 0.08513490699988324,
"count": 1,
"self": 0.0014799549999224837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08365495199996076,
"count": 1,
"self": 0.08365495199996076
}
}
}
}
}
}
}