ppo-Pyramids / run_logs /timers.json
ichiv's picture
firts
4755c5d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.5100663900375366,
"min": 1.5100663900375366,
"max": 1.5100663900375366,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45809.375,
"min": 45809.375,
"max": 45809.375,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29939.0,
"min": 29939.0,
"max": 29939.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29939.0,
"min": 29939.0,
"max": 29939.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2545505166053772,
"min": 0.2545505166053772,
"max": 0.2545505166053772,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 60.32847595214844,
"min": 60.32847595214844,
"max": 60.32847595214844,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017553579062223434,
"min": 0.017553579062223434,
"max": 0.017553579062223434,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.160198211669922,
"min": 4.160198211669922,
"max": 4.160198211669922,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07304438867397589,
"min": 0.07304438867397589,
"max": 0.07304438867397589,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5843551093918071,
"min": 0.5843551093918071,
"max": 0.5843551093918071,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007217467072763173,
"min": 0.007217467072763173,
"max": 0.007217467072763173,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.057739736582105385,
"min": 0.057739736582105385,
"max": 0.057739736582105385,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00019682928439025,
"min": 0.00019682928439025,
"max": 0.00019682928439025,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.001574634275122,
"min": 0.001574634275122,
"max": 0.001574634275122,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16560975,
"min": 0.16560975,
"max": 0.16560975,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.324878,
"min": 1.324878,
"max": 1.324878,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0065644140250000005,
"min": 0.0065644140250000005,
"max": 0.0065644140250000005,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.052515312200000004,
"min": 0.052515312200000004,
"max": 0.052515312200000004,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.410295695066452,
"min": 0.410295695066452,
"max": 0.410295695066452,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 3.282365560531616,
"min": 3.282365560531616,
"max": 3.282365560531616,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 962.0,
"min": 962.0,
"max": 962.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 16354.0,
"min": 16354.0,
"max": 16354.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8452941652606515,
"min": -0.8452941652606515,
"max": -0.8452941652606515,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -14.370000809431076,
"min": -14.370000809431076,
"max": -14.370000809431076,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8452941652606515,
"min": -0.8452941652606515,
"max": -0.8452941652606515,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -14.370000809431076,
"min": -14.370000809431076,
"max": -14.370000809431076,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 8.134048356729394,
"min": 8.134048356729394,
"max": 8.134048356729394,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 138.27882206439972,
"min": 138.27882206439972,
"max": 138.27882206439972,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698141144",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/PyramidsRND.yaml --env=/content/drive/MyDrive/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698141291"
},
"total": 147.26513554999997,
"count": 1,
"self": 2.3183254559999114,
"children": {
"run_training.setup": {
"total": 0.0765699759999734,
"count": 1,
"self": 0.0765699759999734
},
"TrainerController.start_learning": {
"total": 144.87024011800008,
"count": 1,
"self": 0.09729295999659371,
"children": {
"TrainerController._reset_env": {
"total": 12.099789012999963,
"count": 1,
"self": 12.099789012999963
},
"TrainerController.advance": {
"total": 132.38314315900334,
"count": 3154,
"self": 0.1473327560040616,
"children": {
"env_step": {
"total": 77.62382913999431,
"count": 3154,
"self": 63.427304574994196,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.144620270001496,
"count": 3154,
"self": 0.36987558801183695,
"children": {
"TorchPolicy.evaluate": {
"total": 13.77474468198966,
"count": 3151,
"self": 13.77474468198966
}
}
},
"workers": {
"total": 0.05190429499862148,
"count": 3154,
"self": 0.0,
"children": {
"worker_root": {
"total": 144.40407803100857,
"count": 3154,
"is_parallel": true,
"self": 89.30040496500214,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006318687999964823,
"count": 1,
"is_parallel": true,
"self": 0.004410549000567698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019081389993971243,
"count": 8,
"is_parallel": true,
"self": 0.0019081389993971243
}
}
},
"UnityEnvironment.step": {
"total": 0.08122985299996799,
"count": 1,
"is_parallel": true,
"self": 0.0005755100000897073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000468697000087559,
"count": 1,
"is_parallel": true,
"self": 0.000468697000087559
},
"communicator.exchange": {
"total": 0.07822250699996403,
"count": 1,
"is_parallel": true,
"self": 0.07822250699996403
},
"steps_from_proto": {
"total": 0.001963138999826697,
"count": 1,
"is_parallel": true,
"self": 0.00047541499998260406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014877239998440928,
"count": 8,
"is_parallel": true,
"self": 0.0014877239998440928
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 55.10367306600642,
"count": 3153,
"is_parallel": true,
"self": 1.685044303010045,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.108648101992685,
"count": 3153,
"is_parallel": true,
"self": 1.108648101992685
},
"communicator.exchange": {
"total": 47.39334769800007,
"count": 3153,
"is_parallel": true,
"self": 47.39334769800007
},
"steps_from_proto": {
"total": 4.916632963003622,
"count": 3153,
"is_parallel": true,
"self": 1.0095110529980502,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.9071219100055714,
"count": 25224,
"is_parallel": true,
"self": 3.9071219100055714
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 54.611981263004964,
"count": 3154,
"self": 0.18450169900597757,
"children": {
"process_trajectory": {
"total": 10.919611246999239,
"count": 3154,
"self": 10.919611246999239
},
"_update_policy": {
"total": 43.50786831699975,
"count": 16,
"self": 26.454871815004026,
"children": {
"TorchPPOOptimizer.update": {
"total": 17.05299650199572,
"count": 1161,
"self": 17.05299650199572
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3350002063816646e-06,
"count": 1,
"self": 1.3350002063816646e-06
},
"TrainerController._save_models": {
"total": 0.2900136509999811,
"count": 1,
"self": 0.002631371000006766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28738227999997434,
"count": 1,
"self": 0.28738227999997434
}
}
}
}
}
}
}