ppo-Pyramids / run_logs /timers.json
enaitzb's picture
First Push
0a17c25
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5220120549201965,
"min": 0.5126771330833435,
"max": 1.492493748664856,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15601.896484375,
"min": 15298.28515625,
"max": 45276.2890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989952.0,
"min": 29952.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.24719680845737457,
"min": -0.11391686648130417,
"max": 0.3106386065483093,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 64.02397155761719,
"min": -27.34004783630371,
"max": 80.76603698730469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005267125554382801,
"min": 0.005267125554382801,
"max": 0.2093479484319687,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3641855716705322,
"min": 1.3641855716705322,
"max": 50.243507385253906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06995379738599332,
"min": 0.06622164899978007,
"max": 0.07444704377206353,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9793531634039064,
"min": 0.4849271037670698,
"max": 1.0527333028228751,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00998160583607774,
"min": 0.00011332842470164211,
"max": 0.011717497330230092,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13974248170508835,
"min": 0.0014732695211213474,
"max": 0.17576245995345138,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.354426119985711e-06,
"min": 7.354426119985711e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010296196567979995,
"min": 0.00010296196567979995,
"max": 0.0033768187743938006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245144285714285,
"min": 0.10245144285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343202,
"min": 1.3691136000000002,
"max": 2.4256062,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002548991414285714,
"min": 0.0002548991414285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035685879799999994,
"min": 0.0035685879799999994,
"max": 0.11257805938,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0109518738463521,
"min": 0.01087829191237688,
"max": 0.3774993121623993,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15332622826099396,
"min": 0.15229608118534088,
"max": 2.6424951553344727,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 551.1153846153846,
"min": 545.074074074074,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28658.0,
"min": 15984.0,
"max": 32730.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0523293906567144,
"min": -1.0000000521540642,
"max": 1.0523293906567144,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 53.66879892349243,
"min": -32.000001668930054,
"max": 56.559998609125614,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0523293906567144,
"min": -1.0000000521540642,
"max": 1.0523293906567144,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 53.66879892349243,
"min": -32.000001668930054,
"max": 56.559998609125614,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06370741601430756,
"min": 0.061470585636521316,
"max": 7.256573970429599,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2490782167296857,
"min": 3.2490782167296857,
"max": 116.10518352687359,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703175841",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703178021"
},
"total": 2180.8495752979998,
"count": 1,
"self": 0.7407979329996124,
"children": {
"run_training.setup": {
"total": 0.04924006199996711,
"count": 1,
"self": 0.04924006199996711
},
"TrainerController.start_learning": {
"total": 2180.059537303,
"count": 1,
"self": 1.5055777190109438,
"children": {
"TrainerController._reset_env": {
"total": 3.084644322000031,
"count": 1,
"self": 3.084644322000031
},
"TrainerController.advance": {
"total": 2175.341901868989,
"count": 63365,
"self": 1.5696453689743066,
"children": {
"env_step": {
"total": 1523.3421550749933,
"count": 63365,
"self": 1382.6798618390033,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.71468713098432,
"count": 63365,
"self": 5.075888066012112,
"children": {
"TorchPolicy.evaluate": {
"total": 134.6387990649722,
"count": 62572,
"self": 134.6387990649722
}
}
},
"workers": {
"total": 0.9476061050057183,
"count": 63365,
"self": 0.0,
"children": {
"worker_root": {
"total": 2174.343248028003,
"count": 63365,
"is_parallel": true,
"self": 922.467180193049,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003898826000011013,
"count": 1,
"is_parallel": true,
"self": 0.0026457970000706155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012530289999403976,
"count": 8,
"is_parallel": true,
"self": 0.0012530289999403976
}
}
},
"UnityEnvironment.step": {
"total": 0.05332855200003905,
"count": 1,
"is_parallel": true,
"self": 0.0006084620000592622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005378409999821088,
"count": 1,
"is_parallel": true,
"self": 0.0005378409999821088
},
"communicator.exchange": {
"total": 0.05051028899998755,
"count": 1,
"is_parallel": true,
"self": 0.05051028899998755
},
"steps_from_proto": {
"total": 0.001671960000010131,
"count": 1,
"is_parallel": true,
"self": 0.00034310700004880346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013288529999613274,
"count": 8,
"is_parallel": true,
"self": 0.0013288529999613274
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1251.876067834954,
"count": 63364,
"is_parallel": true,
"self": 37.10952919692181,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.328130384995575,
"count": 63364,
"is_parallel": true,
"self": 26.328130384995575
},
"communicator.exchange": {
"total": 1081.4985036990286,
"count": 63364,
"is_parallel": true,
"self": 1081.4985036990286
},
"steps_from_proto": {
"total": 106.93990455400808,
"count": 63364,
"is_parallel": true,
"self": 21.768869086986854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.17103546702123,
"count": 506912,
"is_parallel": true,
"self": 85.17103546702123
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.4301014250215,
"count": 63365,
"self": 2.885306323972486,
"children": {
"process_trajectory": {
"total": 132.02807670504745,
"count": 63365,
"self": 131.79353632904764,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23454037599981348,
"count": 2,
"self": 0.23454037599981348
}
}
},
"_update_policy": {
"total": 515.5167183960016,
"count": 444,
"self": 310.4931097229745,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.02360867302707,
"count": 22782,
"self": 205.02360867302707
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3379999472817872e-06,
"count": 1,
"self": 1.3379999472817872e-06
},
"TrainerController._save_models": {
"total": 0.12741205500014985,
"count": 1,
"self": 0.0020019019998471776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12541015300030267,
"count": 1,
"self": 0.12541015300030267
}
}
}
}
}
}
}