ppo-Huggy / run_logs /timers.json
ChechkovEugene's picture
Huggy
3a8ec62
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403439998626709,
"min": 1.403439998626709,
"max": 1.4244681596755981,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70066.7421875,
"min": 68974.9375,
"max": 77271.9453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 70.90647482014388,
"min": 70.90647482014388,
"max": 406.38709677419354,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49280.0,
"min": 48769.0,
"max": 50392.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999945.0,
"min": 49901.0,
"max": 1999945.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999945.0,
"min": 49901.0,
"max": 1999945.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.505805730819702,
"min": 0.048853449523448944,
"max": 2.584773063659668,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1741.5350341796875,
"min": 6.008974075317383,
"max": 1741.5350341796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8617424060114853,
"min": 1.8565165886307151,
"max": 4.090605696740041,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2683.9109721779823,
"min": 228.35154040157795,
"max": 2689.3197382688522,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8617424060114853,
"min": 1.8565165886307151,
"max": 4.090605696740041,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2683.9109721779823,
"min": 228.35154040157795,
"max": 2689.3197382688522,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017147809871676144,
"min": 0.011161911057327719,
"max": 0.019129289959043186,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05144342961502844,
"min": 0.022323822114655438,
"max": 0.05518393944560861,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06017634636825986,
"min": 0.021711290813982487,
"max": 0.06638118258366982,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18052903910477958,
"min": 0.04342258162796497,
"max": 0.1871425696959098,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.851548716183334e-06,
"min": 3.851548716183334e-06,
"max": 0.000295354276548575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1554646148550001e-05,
"min": 1.1554646148550001e-05,
"max": 0.0008440179186606997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128381666666665,
"min": 0.10128381666666665,
"max": 0.19845142500000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30385144999999997,
"min": 0.20769945000000006,
"max": 0.5813393,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.406245166666667e-05,
"min": 7.406245166666667e-05,
"max": 0.0049227261075,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000222187355,
"min": 0.000222187355,
"max": 0.014068831070000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677489096",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677491556"
},
"total": 2459.524642706,
"count": 1,
"self": 0.4438703280002301,
"children": {
"run_training.setup": {
"total": 0.11794571399991582,
"count": 1,
"self": 0.11794571399991582
},
"TrainerController.start_learning": {
"total": 2458.962826664,
"count": 1,
"self": 4.512183997808279,
"children": {
"TrainerController._reset_env": {
"total": 10.2040178929999,
"count": 1,
"self": 10.2040178929999
},
"TrainerController.advance": {
"total": 2444.1327445681923,
"count": 233501,
"self": 4.5674677113070175,
"children": {
"env_step": {
"total": 1899.5905139060665,
"count": 233501,
"self": 1591.7194520769267,
"children": {
"SubprocessEnvManager._take_step": {
"total": 304.8797597591499,
"count": 233501,
"self": 15.881425741083149,
"children": {
"TorchPolicy.evaluate": {
"total": 288.99833401806677,
"count": 222906,
"self": 73.10165056308233,
"children": {
"TorchPolicy.sample_actions": {
"total": 215.89668345498444,
"count": 222906,
"self": 215.89668345498444
}
}
}
}
},
"workers": {
"total": 2.9913020699898425,
"count": 233501,
"self": 0.0,
"children": {
"worker_root": {
"total": 2450.6087872069656,
"count": 233501,
"is_parallel": true,
"self": 1151.904158835017,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009485929999755172,
"count": 1,
"is_parallel": true,
"self": 0.00031145399998422363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006371389999912935,
"count": 2,
"is_parallel": true,
"self": 0.0006371389999912935
}
}
},
"UnityEnvironment.step": {
"total": 0.0293614479999178,
"count": 1,
"is_parallel": true,
"self": 0.0003073429998039501,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041190199999618926,
"count": 1,
"is_parallel": true,
"self": 0.00041190199999618926
},
"communicator.exchange": {
"total": 0.027920846999904825,
"count": 1,
"is_parallel": true,
"self": 0.027920846999904825
},
"steps_from_proto": {
"total": 0.0007213560002128361,
"count": 1,
"is_parallel": true,
"self": 0.00025420600036341057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004671499998494255,
"count": 2,
"is_parallel": true,
"self": 0.0004671499998494255
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1298.7046283719487,
"count": 233500,
"is_parallel": true,
"self": 39.992361281921376,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.12472420894323,
"count": 233500,
"is_parallel": true,
"self": 81.12472420894323
},
"communicator.exchange": {
"total": 1083.3870804390278,
"count": 233500,
"is_parallel": true,
"self": 1083.3870804390278
},
"steps_from_proto": {
"total": 94.20046244205628,
"count": 233500,
"is_parallel": true,
"self": 38.0582799189649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.14218252309138,
"count": 467000,
"is_parallel": true,
"self": 56.14218252309138
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 539.974762950819,
"count": 233501,
"self": 6.634980813766106,
"children": {
"process_trajectory": {
"total": 171.55158689505447,
"count": 233501,
"self": 170.25979737905436,
"children": {
"RLTrainer._checkpoint": {
"total": 1.291789516000108,
"count": 10,
"self": 1.291789516000108
}
}
},
"_update_policy": {
"total": 361.78819524199844,
"count": 97,
"self": 302.7442654520048,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.043929789993626,
"count": 2910,
"self": 59.043929789993626
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0270005077472888e-06,
"count": 1,
"self": 1.0270005077472888e-06
},
"TrainerController._save_models": {
"total": 0.113879177999479,
"count": 1,
"self": 0.0020443139992494253,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11183486400022957,
"count": 1,
"self": 0.11183486400022957
}
}
}
}
}
}
}