ppo-Huggy / run_logs /timers.json
shivr's picture
Huggy
5828f68
raw
history blame
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4073126316070557,
"min": 1.4073126316070557,
"max": 1.4307005405426025,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71174.8359375,
"min": 66294.8125,
"max": 75377.7578125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.26885245901639,
"min": 81.26885245901639,
"max": 409.38524590163934,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49574.0,
"min": 48817.0,
"max": 50174.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49317.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49317.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.422433376312256,
"min": 0.02333320491015911,
"max": 2.5112802982330322,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1477.684326171875,
"min": 2.823317766189575,
"max": 1493.2392578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7483108170696946,
"min": 1.8085485976343312,
"max": 4.017417683320887,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2286.4695984125137,
"min": 218.83438031375408,
"max": 2359.354767739773,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7483108170696946,
"min": 1.8085485976343312,
"max": 4.017417683320887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2286.4695984125137,
"min": 218.83438031375408,
"max": 2359.354767739773,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01620290673066241,
"min": 0.014207879180644846,
"max": 0.021213491920692225,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048608720191987226,
"min": 0.02899081624527753,
"max": 0.05680395646268152,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05992192228635152,
"min": 0.021966475641561883,
"max": 0.0676796176367336,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17976576685905457,
"min": 0.04402433546880881,
"max": 0.20303885291020077,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3056488981500005e-06,
"min": 3.3056488981500005e-06,
"max": 0.0002953379265540249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.916946694450002e-06,
"min": 9.916946694450002e-06,
"max": 0.00084386416871195,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10110184999999998,
"min": 0.10110184999999998,
"max": 0.19844597500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033055499999999,
"min": 0.207344,
"max": 0.5812880500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.498231500000002e-05,
"min": 6.498231500000002e-05,
"max": 0.0049224541525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019494694500000006,
"min": 0.00019494694500000006,
"max": 0.014066273694999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674963445",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674965588"
},
"total": 2142.900144924,
"count": 1,
"self": 0.39659713399987595,
"children": {
"run_training.setup": {
"total": 0.10377444900007049,
"count": 1,
"self": 0.10377444900007049
},
"TrainerController.start_learning": {
"total": 2142.399773341,
"count": 1,
"self": 3.6642612699720303,
"children": {
"TrainerController._reset_env": {
"total": 9.946715906000009,
"count": 1,
"self": 9.946715906000009
},
"TrainerController.advance": {
"total": 2128.6819054670277,
"count": 232573,
"self": 4.038634869079942,
"children": {
"env_step": {
"total": 1678.6634199930388,
"count": 232573,
"self": 1410.9302134732366,
"children": {
"SubprocessEnvManager._take_step": {
"total": 265.2022251228826,
"count": 232573,
"self": 14.074211958874685,
"children": {
"TorchPolicy.evaluate": {
"total": 251.12801316400794,
"count": 223042,
"self": 63.50124038491208,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.62677277909586,
"count": 223042,
"self": 187.62677277909586
}
}
}
}
},
"workers": {
"total": 2.5309813969196284,
"count": 232573,
"self": 0.0,
"children": {
"worker_root": {
"total": 2134.877249674918,
"count": 232573,
"is_parallel": true,
"self": 969.6525419420584,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003676735999988523,
"count": 1,
"is_parallel": true,
"self": 0.0003635689998873204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033131670001012026,
"count": 2,
"is_parallel": true,
"self": 0.0033131670001012026
}
}
},
"UnityEnvironment.step": {
"total": 0.026575753999964036,
"count": 1,
"is_parallel": true,
"self": 0.00027249100003245985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020895199997994496,
"count": 1,
"is_parallel": true,
"self": 0.00020895199997994496
},
"communicator.exchange": {
"total": 0.02521070399996006,
"count": 1,
"is_parallel": true,
"self": 0.02521070399996006
},
"steps_from_proto": {
"total": 0.0008836069999915708,
"count": 1,
"is_parallel": true,
"self": 0.00024181800006317644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006417889999283943,
"count": 2,
"is_parallel": true,
"self": 0.0006417889999283943
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1165.2247077328595,
"count": 232572,
"is_parallel": true,
"self": 33.47169618475459,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.26137300298228,
"count": 232572,
"is_parallel": true,
"self": 73.26137300298228
},
"communicator.exchange": {
"total": 969.5332721990196,
"count": 232572,
"is_parallel": true,
"self": 969.5332721990196
},
"steps_from_proto": {
"total": 88.95836634610293,
"count": 232572,
"is_parallel": true,
"self": 36.699969083981046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.25839726212189,
"count": 465144,
"is_parallel": true,
"self": 52.25839726212189
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 445.97985060490896,
"count": 232573,
"self": 5.906112916011807,
"children": {
"process_trajectory": {
"total": 140.90502452489693,
"count": 232573,
"self": 139.74755231289714,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1574722119997887,
"count": 10,
"self": 1.1574722119997887
}
}
},
"_update_policy": {
"total": 299.1687131640002,
"count": 97,
"self": 246.76968617700481,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.39902698699541,
"count": 2910,
"self": 52.39902698699541
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.82000074145617e-07,
"count": 1,
"self": 9.82000074145617e-07
},
"TrainerController._save_models": {
"total": 0.10688971600029618,
"count": 1,
"self": 0.0027251780002188752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1041645380000773,
"count": 1,
"self": 0.1041645380000773
}
}
}
}
}
}
}