ppo-Huggy / run_logs /timers.json
eshwarprasadS's picture
Huggy
a9bcbb2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4052609205245972,
"min": 1.405237078666687,
"max": 1.4272716045379639,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71487.03125,
"min": 68226.4921875,
"max": 77929.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.28810020876827,
"min": 91.34750462107209,
"max": 378.5338345864662,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49475.0,
"min": 48982.0,
"max": 50345.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999965.0,
"min": 49884.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999965.0,
"min": 49884.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3451578617095947,
"min": 0.26431939005851746,
"max": 2.381958246231079,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1123.33056640625,
"min": 34.890159606933594,
"max": 1269.583740234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5064208889555086,
"min": 1.7049539116295902,
"max": 3.8355419635772705,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1679.5756058096886,
"min": 225.0539163351059,
"max": 2009.807716012001,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5064208889555086,
"min": 1.7049539116295902,
"max": 3.8355419635772705,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1679.5756058096886,
"min": 225.0539163351059,
"max": 2009.807716012001,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013880670529761118,
"min": 0.012403521597540627,
"max": 0.021841133466659814,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.027761341059522236,
"min": 0.024807043195081254,
"max": 0.06317608293902595,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04488476297507683,
"min": 0.020105578067402045,
"max": 0.05795222253849109,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08976952595015367,
"min": 0.04021115613480409,
"max": 0.16843547771374384,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.273823575425e-06,
"min": 4.273823575425e-06,
"max": 0.000295327726557425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.54764715085e-06,
"min": 8.54764715085e-06,
"max": 0.0008443800185399997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10142457500000002,
"min": 0.10142457500000002,
"max": 0.19844257500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20284915000000003,
"min": 0.20284915000000003,
"max": 0.5814600000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.108629250000001e-05,
"min": 8.108629250000001e-05,
"max": 0.0049222844925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016217258500000003,
"min": 0.00016217258500000003,
"max": 0.014074854000000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675751919",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675754655"
},
"total": 2735.6659800449997,
"count": 1,
"self": 0.7406236689998877,
"children": {
"run_training.setup": {
"total": 0.11954431399999521,
"count": 1,
"self": 0.11954431399999521
},
"TrainerController.start_learning": {
"total": 2734.805812062,
"count": 1,
"self": 5.137922469864861,
"children": {
"TrainerController._reset_env": {
"total": 11.107286851000026,
"count": 1,
"self": 11.107286851000026
},
"TrainerController.advance": {
"total": 2718.4196289161346,
"count": 231663,
"self": 5.316599241158656,
"children": {
"env_step": {
"total": 2118.616382736075,
"count": 231663,
"self": 1766.686924614044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 348.58468683395085,
"count": 231663,
"self": 18.22460880600852,
"children": {
"TorchPolicy.evaluate": {
"total": 330.36007802794234,
"count": 223029,
"self": 81.47455989385344,
"children": {
"TorchPolicy.sample_actions": {
"total": 248.8855181340889,
"count": 223029,
"self": 248.8855181340889
}
}
}
}
},
"workers": {
"total": 3.34477128808021,
"count": 231663,
"self": 0.0,
"children": {
"worker_root": {
"total": 2724.661864466051,
"count": 231663,
"is_parallel": true,
"self": 1294.284258982139,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022358390001500084,
"count": 1,
"is_parallel": true,
"self": 0.0003682700003082573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001867568999841751,
"count": 2,
"is_parallel": true,
"self": 0.001867568999841751
}
}
},
"UnityEnvironment.step": {
"total": 0.036623979999831136,
"count": 1,
"is_parallel": true,
"self": 0.00033556999983375135,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020495700005085382,
"count": 1,
"is_parallel": true,
"self": 0.00020495700005085382
},
"communicator.exchange": {
"total": 0.03520993099982661,
"count": 1,
"is_parallel": true,
"self": 0.03520993099982661
},
"steps_from_proto": {
"total": 0.0008735220001199195,
"count": 1,
"is_parallel": true,
"self": 0.0002872539998861612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005862680002337584,
"count": 2,
"is_parallel": true,
"self": 0.0005862680002337584
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1430.3776054839122,
"count": 231662,
"is_parallel": true,
"self": 42.35871159697376,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 90.79975018805476,
"count": 231662,
"is_parallel": true,
"self": 90.79975018805476
},
"communicator.exchange": {
"total": 1187.0525866178793,
"count": 231662,
"is_parallel": true,
"self": 1187.0525866178793
},
"steps_from_proto": {
"total": 110.16655708100438,
"count": 231662,
"is_parallel": true,
"self": 44.77645356901439,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.39010351198999,
"count": 463324,
"is_parallel": true,
"self": 65.39010351198999
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 594.4866469389005,
"count": 231663,
"self": 8.49190968284256,
"children": {
"process_trajectory": {
"total": 180.13860233605715,
"count": 231663,
"self": 178.9146964910576,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2239058449995355,
"count": 10,
"self": 1.2239058449995355
}
}
},
"_update_policy": {
"total": 405.85613492000084,
"count": 96,
"self": 345.23522319599806,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.62091172400278,
"count": 2880,
"self": 60.62091172400278
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.40000063565094e-07,
"count": 1,
"self": 9.40000063565094e-07
},
"TrainerController._save_models": {
"total": 0.14097288500033756,
"count": 1,
"self": 0.0038351900002453476,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1371376950000922,
"count": 1,
"self": 0.1371376950000922
}
}
}
}
}
}
}