testpyramidsrnd / run_logs /timers.json
epsil's picture
First Pyramids
5048e88
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7905828356742859,
"min": 0.7905828356742859,
"max": 1.4488530158996582,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 23692.185546875,
"min": 23692.185546875,
"max": 43952.40625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479907.0,
"min": 29952.0,
"max": 479907.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479907.0,
"min": 29952.0,
"max": 479907.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.050324004143476486,
"min": -0.14133958518505096,
"max": 0.05295198783278465,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 12.430028915405273,
"min": -33.49748229980469,
"max": 13.026188850402832,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022865276783704758,
"min": 0.019408101215958595,
"max": 0.3364011347293854,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.647723197937012,
"min": 4.774393081665039,
"max": 79.72706604003906,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06561698534601586,
"min": 0.06548031344418126,
"max": 0.0715611568007219,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9186377948442219,
"min": 0.49255309459765373,
"max": 1.0018561952101066,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0076732196910327816,
"min": 0.001003483737965608,
"max": 0.008528199376384478,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10742507567445894,
"min": 0.014048772331518513,
"max": 0.10742507567445894,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.1104878679357146e-05,
"min": 2.1104878679357146e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029546830151100004,
"min": 0.00029546830151100004,
"max": 0.0030854690715104,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10703492857142859,
"min": 0.10703492857142859,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4984890000000002,
"min": 1.3773696000000002,
"max": 2.3386884,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007127893642857145,
"min": 0.0007127893642857145,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009979051100000002,
"min": 0.009979051100000002,
"max": 0.10287611104000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.023121561855077744,
"min": 0.023121561855077744,
"max": 0.45784619450569153,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3237018585205078,
"min": 0.3237018585205078,
"max": 3.204923391342163,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 817.9444444444445,
"min": 812.0,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29446.0,
"min": 15984.0,
"max": 32958.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.2928999567197429,
"min": -1.0000000521540642,
"max": 0.2928999567197429,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 10.544398441910744,
"min": -29.500201620161533,
"max": 10.544398441910744,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.2928999567197429,
"min": -1.0000000521540642,
"max": 0.2928999567197429,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 10.544398441910744,
"min": -29.500201620161533,
"max": 10.544398441910744,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.19783258096625408,
"min": 0.19783258096625408,
"max": 9.425822841003537,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.121972914785147,
"min": 7.121972914785147,
"max": 150.8131654560566,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657271364",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1657272399"
},
"total": 1035.552846647,
"count": 1,
"self": 0.4792353269999694,
"children": {
"run_training.setup": {
"total": 0.04348848199998656,
"count": 1,
"self": 0.04348848199998656
},
"TrainerController.start_learning": {
"total": 1035.0301228380001,
"count": 1,
"self": 0.7443176909882823,
"children": {
"TrainerController._reset_env": {
"total": 10.186253775000012,
"count": 1,
"self": 10.186253775000012
},
"TrainerController.advance": {
"total": 1023.9953682310115,
"count": 31601,
"self": 0.7968940850220179,
"children": {
"env_step": {
"total": 654.7717664469998,
"count": 31601,
"self": 598.3025052109763,
"children": {
"SubprocessEnvManager._take_step": {
"total": 56.07712069400617,
"count": 31601,
"self": 2.4537310670049237,
"children": {
"TorchPolicy.evaluate": {
"total": 53.623389627001245,
"count": 31321,
"self": 18.190058054022188,
"children": {
"TorchPolicy.sample_actions": {
"total": 35.43333157297906,
"count": 31321,
"self": 35.43333157297906
}
}
}
}
},
"workers": {
"total": 0.39214054201733006,
"count": 31601,
"self": 0.0,
"children": {
"worker_root": {
"total": 1032.863793485993,
"count": 31601,
"is_parallel": true,
"self": 488.29543695196526,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007559816999958002,
"count": 1,
"is_parallel": true,
"self": 0.0039324250000163374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003627391999941665,
"count": 8,
"is_parallel": true,
"self": 0.003627391999941665
}
}
},
"UnityEnvironment.step": {
"total": 0.04908670200001097,
"count": 1,
"is_parallel": true,
"self": 0.0004989720000025955,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044811300000446863,
"count": 1,
"is_parallel": true,
"self": 0.00044811300000446863
},
"communicator.exchange": {
"total": 0.04644686700004286,
"count": 1,
"is_parallel": true,
"self": 0.04644686700004286
},
"steps_from_proto": {
"total": 0.0016927499999610518,
"count": 1,
"is_parallel": true,
"self": 0.00041911699992169815,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012736330000393536,
"count": 8,
"is_parallel": true,
"self": 0.0012736330000393536
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 544.5683565340278,
"count": 31600,
"is_parallel": true,
"self": 14.495199145032075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.217369935007241,
"count": 31600,
"is_parallel": true,
"self": 12.217369935007241
},
"communicator.exchange": {
"total": 469.1367598579925,
"count": 31600,
"is_parallel": true,
"self": 469.1367598579925
},
"steps_from_proto": {
"total": 48.7190275959959,
"count": 31600,
"is_parallel": true,
"self": 12.138001012961183,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.58102658303471,
"count": 252800,
"is_parallel": true,
"self": 36.58102658303471
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 368.42670769898973,
"count": 31601,
"self": 1.318300748012689,
"children": {
"process_trajectory": {
"total": 82.1992864399785,
"count": 31601,
"self": 82.0843854469785,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11490099299999201,
"count": 1,
"self": 0.11490099299999201
}
}
},
"_update_policy": {
"total": 284.90912051099855,
"count": 219,
"self": 112.24597976199823,
"children": {
"TorchPPOOptimizer.update": {
"total": 172.66314074900032,
"count": 11439,
"self": 172.66314074900032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2500001957960194e-06,
"count": 1,
"self": 1.2500001957960194e-06
},
"TrainerController._save_models": {
"total": 0.10418189100005293,
"count": 1,
"self": 0.0017239779999727034,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10245791300008023,
"count": 1,
"self": 0.10245791300008023
}
}
}
}
}
}
}