ppo-Pyramids / run_logs /timers.json
shinben0327's picture
First Push
fc76ce3 verified
raw
history blame
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40888598561286926,
"min": 0.40888598561286926,
"max": 1.4832767248153687,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12076.8564453125,
"min": 12076.8564453125,
"max": 44996.68359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989875.0,
"min": 29952.0,
"max": 989875.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989875.0,
"min": 29952.0,
"max": 989875.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6013543605804443,
"min": -0.06041796877980232,
"max": 0.62327641248703,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.58192443847656,
"min": -14.68156623840332,
"max": 174.51739501953125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019434195011854172,
"min": 0.00868353433907032,
"max": 0.347851037979126,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.480443000793457,
"min": 2.3358707427978516,
"max": 84.1799545288086,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06965599983509276,
"min": 0.06538904178854996,
"max": 0.07287731882076644,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9751839976912985,
"min": 0.4912734628320824,
"max": 1.0389336003766705,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017883034190855962,
"min": 0.0016251535215964744,
"max": 0.017883034190855962,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25036247867198347,
"min": 0.01787668873756122,
"max": 0.25036247867198347,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.330533270807149e-06,
"min": 7.330533270807149e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010262746579130008,
"min": 0.00010262746579130008,
"max": 0.0036336154887949,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244347857142856,
"min": 0.10244347857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342087,
"min": 1.3886848,
"max": 2.6177692000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002541035092857145,
"min": 0.0002541035092857145,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003557449130000003,
"min": 0.003557449130000003,
"max": 0.12113938949000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01316326204687357,
"min": 0.01308561209589243,
"max": 0.599920928478241,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18428567051887512,
"min": 0.18428567051887512,
"max": 4.199446678161621,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 305.57608695652175,
"min": 290.48979591836735,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28113.0,
"min": 15984.0,
"max": 33168.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6726782427209874,
"min": -1.0000000521540642,
"max": 1.6910040252136462,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.88639833033085,
"min": -27.183401711285114,
"max": 167.40939849615097,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6726782427209874,
"min": -1.0000000521540642,
"max": 1.6910040252136462,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.88639833033085,
"min": -27.183401711285114,
"max": 167.40939849615097,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04177115554383541,
"min": 0.04093741817647389,
"max": 11.74573023058474,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8429463100328576,
"min": 3.8429463100328576,
"max": 187.93168368935585,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720850684",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/home/shinnigus/.local/bin/mlagents-learn /home/shinnigus/huggingface-deeprl/notebooks/unit5/ml-agents/config/ppo/PyramidsRND.yaml --env=/home/shinnigus/huggingface-deeprl/notebooks/unit5/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720852144"
},
"total": 1460.2105501530023,
"count": 1,
"self": 0.268867318998673,
"children": {
"run_training.setup": {
"total": 0.029221842996776104,
"count": 1,
"self": 0.029221842996776104
},
"TrainerController.start_learning": {
"total": 1459.9124609910068,
"count": 1,
"self": 0.9378263837861596,
"children": {
"TrainerController._reset_env": {
"total": 2.001388735996443,
"count": 1,
"self": 2.001388735996443
},
"TrainerController.advance": {
"total": 1456.9082653702208,
"count": 64228,
"self": 0.9659237306623254,
"children": {
"env_step": {
"total": 947.8234297667368,
"count": 64228,
"self": 726.5215206150315,
"children": {
"SubprocessEnvManager._take_step": {
"total": 220.68337140261428,
"count": 64228,
"self": 2.9355536661169026,
"children": {
"TorchPolicy.evaluate": {
"total": 217.74781773649738,
"count": 62539,
"self": 217.74781773649738
}
}
},
"workers": {
"total": 0.6185377490910469,
"count": 64228,
"self": 0.0,
"children": {
"worker_root": {
"total": 1457.5740660495794,
"count": 64228,
"is_parallel": true,
"self": 791.3476614686806,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016271640051854774,
"count": 1,
"is_parallel": true,
"self": 0.000812960002804175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008142040023813024,
"count": 8,
"is_parallel": true,
"self": 0.0008142040023813024
}
}
},
"UnityEnvironment.step": {
"total": 0.025172206995193847,
"count": 1,
"is_parallel": true,
"self": 0.00016871000116225332,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017386898980475962,
"count": 1,
"is_parallel": true,
"self": 0.00017386898980475962
},
"communicator.exchange": {
"total": 0.024359643997740932,
"count": 1,
"is_parallel": true,
"self": 0.024359643997740932
},
"steps_from_proto": {
"total": 0.0004699840064859018,
"count": 1,
"is_parallel": true,
"self": 0.0001287290215259418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00034125498495996,
"count": 8,
"is_parallel": true,
"self": 0.00034125498495996
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 666.2264045808988,
"count": 64227,
"is_parallel": true,
"self": 10.684605578469927,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.350291012204252,
"count": 64227,
"is_parallel": true,
"self": 7.350291012204252
},
"communicator.exchange": {
"total": 620.4874629875703,
"count": 64227,
"is_parallel": true,
"self": 620.4874629875703
},
"steps_from_proto": {
"total": 27.70404500265431,
"count": 64227,
"is_parallel": true,
"self": 6.591111103669391,
"children": {
"_process_rank_one_or_two_observation": {
"total": 21.11293389898492,
"count": 513816,
"is_parallel": true,
"self": 21.11293389898492
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 508.1189118728216,
"count": 64228,
"self": 1.6844402537099086,
"children": {
"process_trajectory": {
"total": 95.22079032220063,
"count": 64228,
"self": 95.06898185019963,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1518084720009938,
"count": 2,
"self": 0.1518084720009938
}
}
},
"_update_policy": {
"total": 411.2136812969111,
"count": 457,
"self": 174.7665951193194,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.4470861775917,
"count": 22812,
"self": 236.4470861775917
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.259935278445482e-07,
"count": 1,
"self": 5.259935278445482e-07
},
"TrainerController._save_models": {
"total": 0.06497997500991914,
"count": 1,
"self": 0.0007757390121696517,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06420423599774949,
"count": 1,
"self": 0.06420423599774949
}
}
}
}
}
}
}