ppo-Huggy / run_logs /timers.json
MakiPan's picture
Huggy
2eb7c0b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4067021608352661,
"min": 1.4066948890686035,
"max": 1.4271438121795654,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69340.5703125,
"min": 69045.6484375,
"max": 76882.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.16045380875202,
"min": 77.28188976377953,
"max": 425.34453781512605,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49459.0,
"min": 48843.0,
"max": 50616.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49986.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49986.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.477628231048584,
"min": -0.005821355152875185,
"max": 2.5121116638183594,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1528.6966552734375,
"min": -0.6869199275970459,
"max": 1570.2490234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8633245134083145,
"min": 1.818719674722623,
"max": 3.9865133119007896,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2383.67122477293,
"min": 214.60892161726952,
"max": 2465.589051425457,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8633245134083145,
"min": 1.818719674722623,
"max": 3.9865133119007896,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2383.67122477293,
"min": 214.60892161726952,
"max": 2465.589051425457,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016287201620838864,
"min": 0.012344713716273267,
"max": 0.01993649478536099,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048861604862516596,
"min": 0.024689427432546533,
"max": 0.05481511887434559,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06370373260643747,
"min": 0.023635759359846513,
"max": 0.06477069134513537,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1911111978193124,
"min": 0.047271518719693026,
"max": 0.1911111978193124,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4271488576499985e-06,
"min": 3.4271488576499985e-06,
"max": 0.000295299001567,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0281446572949996e-05,
"min": 1.0281446572949996e-05,
"max": 0.0008440812186395997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114235,
"min": 0.10114235,
"max": 0.19843299999999994,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30342705000000003,
"min": 0.20740895,
"max": 0.5813604,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.700326499999998e-05,
"min": 6.700326499999998e-05,
"max": 0.0049218067,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020100979499999995,
"min": 0.00020100979499999995,
"max": 0.014069883959999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679085697",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679088193"
},
"total": 2495.374125943,
"count": 1,
"self": 0.4383068319998529,
"children": {
"run_training.setup": {
"total": 0.2545354389999943,
"count": 1,
"self": 0.2545354389999943
},
"TrainerController.start_learning": {
"total": 2494.681283672,
"count": 1,
"self": 4.39778800393151,
"children": {
"TrainerController._reset_env": {
"total": 9.10333916799999,
"count": 1,
"self": 9.10333916799999
},
"TrainerController.advance": {
"total": 2481.0646185620685,
"count": 232787,
"self": 4.9735864891417805,
"children": {
"env_step": {
"total": 1932.1601969079784,
"count": 232787,
"self": 1627.827597920892,
"children": {
"SubprocessEnvManager._take_step": {
"total": 301.3424954910392,
"count": 232787,
"self": 17.25782846010145,
"children": {
"TorchPolicy.evaluate": {
"total": 284.08466703093774,
"count": 222898,
"self": 284.08466703093774
}
}
},
"workers": {
"total": 2.990103496047084,
"count": 232787,
"self": 0.0,
"children": {
"worker_root": {
"total": 2486.1962628469873,
"count": 232787,
"is_parallel": true,
"self": 1163.6048296780536,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013165700000001834,
"count": 1,
"is_parallel": true,
"self": 0.00041744500003915164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008991249999610318,
"count": 2,
"is_parallel": true,
"self": 0.0008991249999610318
}
}
},
"UnityEnvironment.step": {
"total": 0.030116823000014392,
"count": 1,
"is_parallel": true,
"self": 0.0003436500000475462,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021049999998012936,
"count": 1,
"is_parallel": true,
"self": 0.00021049999998012936
},
"communicator.exchange": {
"total": 0.028837164999998777,
"count": 1,
"is_parallel": true,
"self": 0.028837164999998777
},
"steps_from_proto": {
"total": 0.0007255079999879399,
"count": 1,
"is_parallel": true,
"self": 0.00023722200000975135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004882859999781886,
"count": 2,
"is_parallel": true,
"self": 0.0004882859999781886
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1322.5914331689337,
"count": 232786,
"is_parallel": true,
"self": 39.26015246894349,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.73475885494952,
"count": 232786,
"is_parallel": true,
"self": 83.73475885494952
},
"communicator.exchange": {
"total": 1105.767706193092,
"count": 232786,
"is_parallel": true,
"self": 1105.767706193092
},
"steps_from_proto": {
"total": 93.82881565194887,
"count": 232786,
"is_parallel": true,
"self": 37.49318895295923,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.33562669898964,
"count": 465572,
"is_parallel": true,
"self": 56.33562669898964
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 543.9308351649483,
"count": 232787,
"self": 6.645098412012317,
"children": {
"process_trajectory": {
"total": 155.10337186593497,
"count": 232787,
"self": 153.65896604093447,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4444058250005014,
"count": 10,
"self": 1.4444058250005014
}
}
},
"_update_policy": {
"total": 382.18236488700103,
"count": 97,
"self": 322.87850298000626,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.30386190699477,
"count": 2910,
"self": 59.30386190699477
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0580001799098682e-06,
"count": 1,
"self": 1.0580001799098682e-06
},
"TrainerController._save_models": {
"total": 0.11553688000003604,
"count": 1,
"self": 0.002648135999606893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11288874400042914,
"count": 1,
"self": 0.11288874400042914
}
}
}
}
}
}
}