ppo-pyramidsFR2 / run_logs /timers.json
Liapunov's picture
First modified commit
9d54584
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30226588249206543,
"min": 0.2905525863170624,
"max": 0.48415157198905945,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9043.794921875,
"min": 8693.3330078125,
"max": 14447.771484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 1979893.0,
"min": 1019901.0,
"max": 1979893.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 1979893.0,
"min": 1019901.0,
"max": 1979893.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6581579446792603,
"min": 0.10869690775871277,
"max": 0.6835697889328003,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 184.9423828125,
"min": 24.538314819335938,
"max": 197.91285705566406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05591478571295738,
"min": -0.5486251711845398,
"max": 0.6454031467437744,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.712055206298828,
"min": -149.22604370117188,
"max": 179.4220733642578,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 291.6868686868687,
"min": 273.9642857142857,
"max": 716.9,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28877.0,
"min": 15408.0,
"max": 32226.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6867179843783378,
"min": 0.5828799590468406,
"max": 1.7276306189932265,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 168.6717984378338,
"min": 18.585199385881424,
"max": 191.76699870824814,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6867179843783378,
"min": 0.5828799590468406,
"max": 1.7276306189932265,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 168.6717984378338,
"min": 18.585199385881424,
"max": 191.76699870824814,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08323891130188713,
"min": 0.07760038152614512,
"max": 0.38253927688783734,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.323891130188713,
"min": 8.065886851632968,
"max": 15.301571075513493,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.030638242301730706,
"min": 0.02839826929682334,
"max": 0.03363850301996405,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.4289353922242299,
"min": 0.3010105590987951,
"max": 0.49610404586419454,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01592927757717137,
"min": 0.00933230190874181,
"max": 0.07634348353991904,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22300988608039915,
"min": 0.08399071717867629,
"max": 1.1451522530987857,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.261262531992852e-06,
"min": 5.261262531992852e-06,
"max": 0.00014838705053766665,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.365767544789993e-05,
"min": 7.365767544789993e-05,
"max": 0.0020256206747933498,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10175372142857142,
"min": 0.10175372142857142,
"max": 0.14946233333333336,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4245520999999999,
"min": 1.3451610000000003,
"max": 2.11133085,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018519677071428552,
"min": 0.00018519677071428552,
"max": 0.004951287100000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0025927547899999973,
"min": 0.0025927547899999973,
"max": 0.067593144335,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.027942849323153496,
"min": 0.027649715542793274,
"max": 0.052572958171367645,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.39119988679885864,
"min": 0.38709601759910583,
"max": 0.7247375845909119,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675182632",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn --resume ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675184765"
},
"total": 2133.003628408,
"count": 1,
"self": 0.4751166099999864,
"children": {
"run_training.setup": {
"total": 0.09911593200013158,
"count": 1,
"self": 0.09911593200013158
},
"TrainerController.start_learning": {
"total": 2132.429395866,
"count": 1,
"self": 1.1965176899648213,
"children": {
"TrainerController._reset_env": {
"total": 6.112986608000028,
"count": 1,
"self": 6.112986608000028
},
"TrainerController.advance": {
"total": 2125.0332825380347,
"count": 64531,
"self": 1.3073357259436307,
"children": {
"env_step": {
"total": 1422.273043184955,
"count": 64531,
"self": 1322.065725131898,
"children": {
"SubprocessEnvManager._take_step": {
"total": 99.47658951301219,
"count": 64531,
"self": 4.177412174096389,
"children": {
"TorchPolicy.evaluate": {
"total": 95.2991773389158,
"count": 62544,
"self": 32.42140378492786,
"children": {
"TorchPolicy.sample_actions": {
"total": 62.877773553987936,
"count": 62544,
"self": 62.877773553987936
}
}
}
}
},
"workers": {
"total": 0.7307285400447654,
"count": 64531,
"self": 0.0,
"children": {
"worker_root": {
"total": 2129.417411901921,
"count": 64531,
"is_parallel": true,
"self": 901.1585452369432,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001725695999994059,
"count": 1,
"is_parallel": true,
"self": 0.0006433660009861342,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001082329999007925,
"count": 8,
"is_parallel": true,
"self": 0.001082329999007925
}
}
},
"UnityEnvironment.step": {
"total": 0.04415739600017332,
"count": 1,
"is_parallel": true,
"self": 0.0004756240009555768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042504999964876333,
"count": 1,
"is_parallel": true,
"self": 0.00042504999964876333
},
"communicator.exchange": {
"total": 0.04169974699971135,
"count": 1,
"is_parallel": true,
"self": 0.04169974699971135
},
"steps_from_proto": {
"total": 0.0015569749998576299,
"count": 1,
"is_parallel": true,
"self": 0.00042672600011428585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001130248999743344,
"count": 8,
"is_parallel": true,
"self": 0.001130248999743344
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1228.2588666649776,
"count": 64530,
"is_parallel": true,
"self": 26.56036614386676,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.27402726313403,
"count": 64530,
"is_parallel": true,
"self": 21.27402726313403
},
"communicator.exchange": {
"total": 1093.3448889038928,
"count": 64530,
"is_parallel": true,
"self": 1093.3448889038928
},
"steps_from_proto": {
"total": 87.07958435408409,
"count": 64530,
"is_parallel": true,
"self": 20.499208944890142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.58037540919395,
"count": 516240,
"is_parallel": true,
"self": 66.58037540919395
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 701.452903627136,
"count": 64531,
"self": 2.3567661701536053,
"children": {
"process_trajectory": {
"total": 140.65703218398585,
"count": 64531,
"self": 140.47350847598636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18352370799948403,
"count": 2,
"self": 0.18352370799948403
}
}
},
"_update_policy": {
"total": 558.4391052729966,
"count": 473,
"self": 292.17689421198565,
"children": {
"TorchPPOOptimizer.update": {
"total": 266.26221106101093,
"count": 9465,
"self": 266.26221106101093
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.080004019779153e-07,
"count": 1,
"self": 9.080004019779153e-07
},
"TrainerController._save_models": {
"total": 0.08660812200014334,
"count": 1,
"self": 0.0017107240000768797,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08489739800006646,
"count": 1,
"self": 0.08489739800006646
}
}
}
}
}
}
}