ppo-Huggy / run_logs /timers.json
shinben0327's picture
Huggy
f2e5cbb verified
raw
history blame
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405854344367981,
"min": 1.405854344367981,
"max": 1.4299639463424683,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71283.84375,
"min": 66524.75,
"max": 79295.3359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 94.35809523809523,
"min": 83.56587837837837,
"max": 394.79527559055117,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49538.0,
"min": 48883.0,
"max": 50202.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999963.0,
"min": 49703.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999963.0,
"min": 49703.0,
"max": 1999963.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3849222660064697,
"min": 0.0780317410826683,
"max": 2.4722304344177246,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1252.084228515625,
"min": 9.831999778747559,
"max": 1461.088134765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7214353832744416,
"min": 1.7672370809411246,
"max": 3.9556405436691393,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1953.7535762190819,
"min": 222.6718721985817,
"max": 2243.0415620207787,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7214353832744416,
"min": 1.7672370809411246,
"max": 3.9556405436691393,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1953.7535762190819,
"min": 222.6718721985817,
"max": 2243.0415620207787,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01681895739149897,
"min": 0.013829042985586662,
"max": 0.021993728312211008,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05045687217449692,
"min": 0.027658085971173325,
"max": 0.06598118493663302,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05071070376369688,
"min": 0.02157631960387031,
"max": 0.06358005950848261,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15213211129109064,
"min": 0.04315263920774062,
"max": 0.18544680600365002,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3507988831000067e-06,
"min": 3.3507988831000067e-06,
"max": 0.000295332676555775,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.005239664930002e-05,
"min": 1.005239664930002e-05,
"max": 0.0008437206187597999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111690000000001,
"min": 0.10111690000000001,
"max": 0.19844422500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30335070000000003,
"min": 0.20737965000000003,
"max": 0.5812401999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.573331000000012e-05,
"min": 6.573331000000012e-05,
"max": 0.004922366827499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019719993000000036,
"min": 0.00019719993000000036,
"max": 0.01406388598,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719212339",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719214762"
},
"total": 2423.035788583,
"count": 1,
"self": 0.4849382490001517,
"children": {
"run_training.setup": {
"total": 0.07729734999998072,
"count": 1,
"self": 0.07729734999998072
},
"TrainerController.start_learning": {
"total": 2422.473552984,
"count": 1,
"self": 4.607422178921297,
"children": {
"TrainerController._reset_env": {
"total": 3.46483602100011,
"count": 1,
"self": 3.46483602100011
},
"TrainerController.advance": {
"total": 2414.275011804078,
"count": 232332,
"self": 4.749693419002597,
"children": {
"env_step": {
"total": 1912.2568005741498,
"count": 232332,
"self": 1578.613036452318,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.6855132398787,
"count": 232332,
"self": 19.054467131909405,
"children": {
"TorchPolicy.evaluate": {
"total": 311.63104610796927,
"count": 223048,
"self": 311.63104610796927
}
}
},
"workers": {
"total": 2.958250881953063,
"count": 232332,
"self": 0.0,
"children": {
"worker_root": {
"total": 2415.081887378035,
"count": 232332,
"is_parallel": true,
"self": 1148.8393417489876,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010930739999821526,
"count": 1,
"is_parallel": true,
"self": 0.0002698130001590471,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008232609998231055,
"count": 2,
"is_parallel": true,
"self": 0.0008232609998231055
}
}
},
"UnityEnvironment.step": {
"total": 0.030596848999948634,
"count": 1,
"is_parallel": true,
"self": 0.00038131399992380466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019708699983311817,
"count": 1,
"is_parallel": true,
"self": 0.00019708699983311817
},
"communicator.exchange": {
"total": 0.029185209000161194,
"count": 1,
"is_parallel": true,
"self": 0.029185209000161194
},
"steps_from_proto": {
"total": 0.0008332390000305168,
"count": 1,
"is_parallel": true,
"self": 0.00024367400010305573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000589564999927461,
"count": 2,
"is_parallel": true,
"self": 0.000589564999927461
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.2425456290473,
"count": 232331,
"is_parallel": true,
"self": 39.85811857989347,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.00870744109261,
"count": 232331,
"is_parallel": true,
"self": 80.00870744109261
},
"communicator.exchange": {
"total": 1054.6839809981038,
"count": 232331,
"is_parallel": true,
"self": 1054.6839809981038
},
"steps_from_proto": {
"total": 91.69173860995738,
"count": 232331,
"is_parallel": true,
"self": 32.165636502827056,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.52610210713033,
"count": 464662,
"is_parallel": true,
"self": 59.52610210713033
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 497.2685178109259,
"count": 232332,
"self": 7.351680750984542,
"children": {
"process_trajectory": {
"total": 153.28513063294417,
"count": 232332,
"self": 152.05464399694438,
"children": {
"RLTrainer._checkpoint": {
"total": 1.230486635999796,
"count": 10,
"self": 1.230486635999796
}
}
},
"_update_policy": {
"total": 336.6317064269972,
"count": 97,
"self": 271.0931422049946,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.5385642220026,
"count": 2910,
"self": 65.5385642220026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0410003596916795e-06,
"count": 1,
"self": 1.0410003596916795e-06
},
"TrainerController._save_models": {
"total": 0.12628193900036422,
"count": 1,
"self": 0.0026793010006258555,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12360263799973836,
"count": 1,
"self": 0.12360263799973836
}
}
}
}
}
}
}