SnowballTarget3 / run_logs /timers.json
Agog's picture
empty
270b7c8 unverified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7013018727302551,
"min": 0.6640027165412903,
"max": 0.8622682094573975,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6680.6015625,
"min": 6339.89794921875,
"max": 8294.6728515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 399984.0,
"min": 209952.0,
"max": 399984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 399984.0,
"min": 209952.0,
"max": 399984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.379910469055176,
"min": 12.89579963684082,
"max": 13.408547401428223,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2609.08251953125,
"min": 2372.8271484375,
"max": 2735.34375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0686802695690858,
"min": 0.06430996767263494,
"max": 0.0772170364093743,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2747210782763432,
"min": 0.2572398706905398,
"max": 0.3721021125777898,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17806019392960212,
"min": 0.1707822937591403,
"max": 0.2220527931171305,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7122407757184085,
"min": 0.6831291750365612,
"max": 1.1102639655856525,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.561098812999997e-06,
"min": 3.561098812999997e-06,
"max": 0.000145461051513,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.4244395251999988e-05,
"min": 1.4244395251999988e-05,
"max": 0.0006571802809399999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101187,
"min": 0.101187,
"max": 0.148487,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.404748,
"min": 0.404748,
"max": 0.71906,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.923129999999995e-05,
"min": 6.923129999999995e-05,
"max": 0.0024295013000000007,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0002769251999999998,
"min": 0.0002769251999999998,
"max": 0.010981094,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.34090909090909,
"min": 25.327272727272728,
"max": 26.522727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1159.0,
"min": 1120.0,
"max": 1446.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.34090909090909,
"min": 25.327272727272728,
"max": 26.522727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1159.0,
"min": 1120.0,
"max": 1446.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676486508",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/home/jonatan/PycharmProjects/HuggingfaceDeepRLCourse/Unit5/venv/bin/mlagents-learn ./content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.5",
"end_time_seconds": "1676486828"
},
"total": 319.7264541609911,
"count": 1,
"self": 0.3750912419054657,
"children": {
"run_training.setup": {
"total": 0.01992227102164179,
"count": 1,
"self": 0.01992227102164179
},
"TrainerController.start_learning": {
"total": 319.331440648064,
"count": 1,
"self": 0.44656072894576937,
"children": {
"TrainerController._reset_env": {
"total": 2.602945677936077,
"count": 1,
"self": 2.602945677936077
},
"TrainerController.advance": {
"total": 316.15822794416454,
"count": 18140,
"self": 0.20747006044257432,
"children": {
"env_step": {
"total": 315.95075788372196,
"count": 18140,
"self": 215.02938846347388,
"children": {
"SubprocessEnvManager._take_step": {
"total": 100.67914684314746,
"count": 18140,
"self": 1.143365952768363,
"children": {
"TorchPolicy.evaluate": {
"total": 99.5357808903791,
"count": 18140,
"self": 24.05113153764978,
"children": {
"TorchPolicy.sample_actions": {
"total": 75.48464935272932,
"count": 18140,
"self": 75.48464935272932
}
}
}
}
},
"workers": {
"total": 0.24222257710061967,
"count": 18140,
"self": 0.0,
"children": {
"worker_root": {
"total": 318.66177946177777,
"count": 18140,
"is_parallel": true,
"self": 163.58133103558794,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014973619254305959,
"count": 1,
"is_parallel": true,
"self": 0.00041328067891299725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010840812465175986,
"count": 10,
"is_parallel": true,
"self": 0.0010840812465175986
}
}
},
"UnityEnvironment.step": {
"total": 0.022471531061455607,
"count": 1,
"is_parallel": true,
"self": 0.00029101502150297165,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020799902267754078,
"count": 1,
"is_parallel": true,
"self": 0.00020799902267754078
},
"communicator.exchange": {
"total": 0.021059047081507742,
"count": 1,
"is_parallel": true,
"self": 0.021059047081507742
},
"steps_from_proto": {
"total": 0.0009134699357673526,
"count": 1,
"is_parallel": true,
"self": 0.0002164661418646574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006970037939026952,
"count": 10,
"is_parallel": true,
"self": 0.0006970037939026952
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 155.08044842618983,
"count": 18139,
"is_parallel": true,
"self": 7.599670621799305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.053588181035593,
"count": 18139,
"is_parallel": true,
"self": 4.053588181035593
},
"communicator.exchange": {
"total": 123.36690755712334,
"count": 18139,
"is_parallel": true,
"self": 123.36690755712334
},
"steps_from_proto": {
"total": 20.060282066231593,
"count": 18139,
"is_parallel": true,
"self": 4.2335486222291365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 15.826733444002457,
"count": 181390,
"is_parallel": true,
"self": 15.826733444002457
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.423898205161095e-05,
"count": 1,
"self": 7.423898205161095e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 314.5764184364816,
"count": 361499,
"is_parallel": true,
"self": 4.800364643684588,
"children": {
"process_trajectory": {
"total": 175.4521761490032,
"count": 361499,
"is_parallel": true,
"self": 174.722319738823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7298564101802185,
"count": 4,
"is_parallel": true,
"self": 0.7298564101802185
}
}
},
"_update_policy": {
"total": 134.32387764379382,
"count": 90,
"is_parallel": true,
"self": 39.981511781108566,
"children": {
"TorchPPOOptimizer.update": {
"total": 94.34236586268526,
"count": 4587,
"is_parallel": true,
"self": 94.34236586268526
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1236320580355823,
"count": 1,
"self": 0.000865268986672163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12276678904891014,
"count": 1,
"self": 0.12276678904891014
}
}
}
}
}
}
}