bcyeung's picture
First Push
a68ea27 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8599633574485779,
"min": 0.8599633574485779,
"max": 0.8599633574485779,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 15520.619140625,
"min": 15520.619140625,
"max": 15520.619140625,
"count": 1
},
"Pyramids.Step.mean": {
"value": 89984.0,
"min": 89984.0,
"max": 89984.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 89984.0,
"min": 89984.0,
"max": 89984.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10127768665552139,
"min": -0.10127768665552139,
"max": -0.10127768665552139,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -14.381431579589844,
"min": -14.381431579589844,
"max": -14.381431579589844,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.17203567922115326,
"min": 0.17203567922115326,
"max": 0.17203567922115326,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 24.429065704345703,
"min": 24.429065704345703,
"max": 24.429065704345703,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06334216419012613,
"min": 0.06334216419012613,
"max": 0.06334216419012613,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.2533686567605045,
"min": 0.2533686567605045,
"max": 0.2533686567605045,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00018090784972408638,
"min": 0.00018090784972408638,
"max": 0.00018090784972408638,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0007236313988963455,
"min": 0.0007236313988963455,
"max": 0.0007236313988963455,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.2992082335999994e-05,
"min": 5.2992082335999994e-05,
"max": 5.2992082335999994e-05,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00021196832934399998,
"min": 0.00021196832934399998,
"max": 0.00021196832934399998,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.11766400000000002,
"min": 0.11766400000000002,
"max": 0.11766400000000002,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.4706560000000001,
"min": 0.4706560000000001,
"max": 0.4706560000000001,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0017746336000000003,
"min": 0.0017746336000000003,
"max": 0.0017746336000000003,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.007098534400000001,
"min": 0.007098534400000001,
"max": 0.007098534400000001,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.10569269955158234,
"min": 0.10569269955158234,
"max": 0.10569269955158234,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.42277079820632935,
"min": 0.42277079820632935,
"max": 0.42277079820632935,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.1135634500533342,
"min": 1.1135634500533342,
"max": 1.1135634500533342,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 17.817015200853348,
"min": 17.817015200853348,
"max": 17.817015200853348,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729485907",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729485971"
},
"total": 64.39221033100011,
"count": 1,
"self": 0.846474371000113,
"children": {
"run_training.setup": {
"total": 0.05253644699996585,
"count": 1,
"self": 0.05253644699996585
},
"TrainerController.start_learning": {
"total": 63.493199513000036,
"count": 1,
"self": 0.03421665900100379,
"children": {
"TrainerController._reset_env": {
"total": 1.8634459940001307,
"count": 1,
"self": 1.8634459940001307
},
"TrainerController.advance": {
"total": 61.414933784998766,
"count": 1768,
"self": 0.03682404100254644,
"children": {
"env_step": {
"total": 41.88220661299761,
"count": 1768,
"self": 37.45286929799977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4.406817632000866,
"count": 1768,
"self": 0.12678290500116418,
"children": {
"TorchPolicy.evaluate": {
"total": 4.280034726999702,
"count": 1768,
"self": 4.280034726999702
}
}
},
"workers": {
"total": 0.022519682996971824,
"count": 1768,
"self": 0.0,
"children": {
"worker_root": {
"total": 62.942129577001424,
"count": 1768,
"is_parallel": true,
"self": 28.6303863380017,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019833699998343945,
"count": 1,
"is_parallel": true,
"self": 0.0006671329997516295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001316237000082765,
"count": 8,
"is_parallel": true,
"self": 0.001316237000082765
}
}
},
"UnityEnvironment.step": {
"total": 0.05535886300003767,
"count": 1,
"is_parallel": true,
"self": 0.0006598769996344345,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004526940001596813,
"count": 1,
"is_parallel": true,
"self": 0.0004526940001596813
},
"communicator.exchange": {
"total": 0.052558122000164076,
"count": 1,
"is_parallel": true,
"self": 0.052558122000164076
},
"steps_from_proto": {
"total": 0.0016881700000794808,
"count": 1,
"is_parallel": true,
"self": 0.00034941400053867255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013387559995408083,
"count": 8,
"is_parallel": true,
"self": 0.0013387559995408083
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 34.311743238999725,
"count": 1767,
"is_parallel": true,
"self": 0.9216294459997698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.6539582459990925,
"count": 1767,
"is_parallel": true,
"self": 0.6539582459990925
},
"communicator.exchange": {
"total": 30.09883404499851,
"count": 1767,
"is_parallel": true,
"self": 30.09883404499851
},
"steps_from_proto": {
"total": 2.637321502002351,
"count": 1767,
"is_parallel": true,
"self": 0.5235672419960338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.113754260006317,
"count": 14136,
"is_parallel": true,
"self": 2.113754260006317
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19.495903130998613,
"count": 1768,
"self": 0.048150782994298424,
"children": {
"process_trajectory": {
"total": 3.7887640780043057,
"count": 1768,
"self": 3.7887640780043057
},
"_update_policy": {
"total": 15.658988270000009,
"count": 7,
"self": 8.391535989999056,
"children": {
"TorchPPOOptimizer.update": {
"total": 7.267452280000953,
"count": 615,
"self": 7.267452280000953
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9060000795434462e-06,
"count": 1,
"self": 1.9060000795434462e-06
},
"TrainerController._save_models": {
"total": 0.18060116900005596,
"count": 1,
"self": 0.0040906670001277234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17651050199992824,
"count": 1,
"self": 0.17651050199992824
}
}
}
}
}
}
}