cys's picture
First Push
664ec85
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3569077253341675,
"min": 0.353641539812088,
"max": 1.3381640911102295,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10821.4423828125,
"min": 10767.2158203125,
"max": 40594.546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6040346026420593,
"min": -0.0947328582406044,
"max": 0.6040346026420593,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.94178771972656,
"min": -22.735885620117188,
"max": 170.94178771972656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011338387615978718,
"min": -0.011338387615978718,
"max": 0.37206146121025085,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.208763599395752,
"min": -3.208763599395752,
"max": 89.29475402832031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0684448701226973,
"min": 0.0647062408044638,
"max": 0.07488378697195278,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9582281817177621,
"min": 0.49027942328100976,
"max": 1.0676154351608336,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01594100260574903,
"min": 0.00027356665512712344,
"max": 0.017131213958020367,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22317403648048642,
"min": 0.002492218989659548,
"max": 0.24483982914049796,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.569118905564283e-06,
"min": 7.569118905564283e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010596766467789996,
"min": 0.00010596766467789996,
"max": 0.0033821369726211,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252300714285714,
"min": 0.10252300714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353221,
"min": 1.3691136000000002,
"max": 2.5273789000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002620484135714285,
"min": 0.0002620484135714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003668677789999999,
"min": 0.003668677789999999,
"max": 0.11276515210999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013837912119925022,
"min": 0.013404861092567444,
"max": 0.4772012233734131,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19373077154159546,
"min": 0.1876680552959442,
"max": 3.3404085636138916,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 304.60215053763443,
"min": 304.60215053763443,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28328.0,
"min": 15984.0,
"max": 32569.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6523827820695856,
"min": -1.0000000521540642,
"max": 1.6523827820695856,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.67159873247147,
"min": -32.000001668930054,
"max": 153.67159873247147,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6523827820695856,
"min": -1.0000000521540642,
"max": 1.6523827820695856,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.67159873247147,
"min": -32.000001668930054,
"max": 153.67159873247147,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044104576915576406,
"min": 0.044104576915576406,
"max": 10.104298051446676,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.1017256531486055,
"min": 3.976658143365057,
"max": 161.66876882314682,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697340876",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697343081"
},
"total": 2205.594899757,
"count": 1,
"self": 0.4770632079994357,
"children": {
"run_training.setup": {
"total": 0.05746492700006911,
"count": 1,
"self": 0.05746492700006911
},
"TrainerController.start_learning": {
"total": 2205.0603716220003,
"count": 1,
"self": 1.481253010038472,
"children": {
"TrainerController._reset_env": {
"total": 7.950015220999944,
"count": 1,
"self": 7.950015220999944
},
"TrainerController.advance": {
"total": 2195.5527977349616,
"count": 63720,
"self": 1.51203750086961,
"children": {
"env_step": {
"total": 1590.94682393503,
"count": 63720,
"self": 1459.2588155339736,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.8028642930151,
"count": 63720,
"self": 4.877443795023282,
"children": {
"TorchPolicy.evaluate": {
"total": 125.92542049799181,
"count": 62556,
"self": 125.92542049799181
}
}
},
"workers": {
"total": 0.8851441080413451,
"count": 63720,
"self": 0.0,
"children": {
"worker_root": {
"total": 2200.3558242529784,
"count": 63720,
"is_parallel": true,
"self": 860.6648115969888,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009738329000015256,
"count": 1,
"is_parallel": true,
"self": 0.007666976000109571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020713529999056846,
"count": 8,
"is_parallel": true,
"self": 0.0020713529999056846
}
}
},
"UnityEnvironment.step": {
"total": 0.05150691200003621,
"count": 1,
"is_parallel": true,
"self": 0.0006903010000769427,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005785019999393626,
"count": 1,
"is_parallel": true,
"self": 0.0005785019999393626
},
"communicator.exchange": {
"total": 0.04835521199993309,
"count": 1,
"is_parallel": true,
"self": 0.04835521199993309
},
"steps_from_proto": {
"total": 0.001882897000086814,
"count": 1,
"is_parallel": true,
"self": 0.00043063300006451755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014522640000222964,
"count": 8,
"is_parallel": true,
"self": 0.0014522640000222964
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1339.6910126559897,
"count": 63719,
"is_parallel": true,
"self": 35.40073421087436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.159625845027904,
"count": 63719,
"is_parallel": true,
"self": 24.159625845027904
},
"communicator.exchange": {
"total": 1181.090253827042,
"count": 63719,
"is_parallel": true,
"self": 1181.090253827042
},
"steps_from_proto": {
"total": 99.04039877304535,
"count": 63719,
"is_parallel": true,
"self": 20.356051303040545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.68434747000481,
"count": 509752,
"is_parallel": true,
"self": 78.68434747000481
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 603.0939362990621,
"count": 63720,
"self": 2.716116205059734,
"children": {
"process_trajectory": {
"total": 117.26751319999687,
"count": 63720,
"self": 117.03234854499681,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23516465500006234,
"count": 2,
"self": 0.23516465500006234
}
}
},
"_update_policy": {
"total": 483.1103068940055,
"count": 441,
"self": 288.7499267229921,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.3603801710134,
"count": 22779,
"self": 194.3603801710134
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1050001376133878e-06,
"count": 1,
"self": 1.1050001376133878e-06
},
"TrainerController._save_models": {
"total": 0.07630455100024847,
"count": 1,
"self": 0.001465527000164002,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07483902400008446,
"count": 1,
"self": 0.07483902400008446
}
}
}
}
}
}
}