AneeshSinha's picture
First Push
3cba002 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.696609616279602,
"min": 0.696609616279602,
"max": 2.8352744579315186,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6620.57763671875,
"min": 6620.57763671875,
"max": 28942.48046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.802757263183594,
"min": 0.38978490233421326,
"max": 11.855842590332031,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2301.53759765625,
"min": 75.61827087402344,
"max": 2418.591796875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06956119947740351,
"min": 0.061706848162929435,
"max": 0.07523903495109385,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27824479790961404,
"min": 0.24682739265171774,
"max": 0.36489404524749547,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1788107183780156,
"min": 0.11141497405433078,
"max": 0.29012324412663776,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7152428735120624,
"min": 0.4456598962173231,
"max": 1.2203167703221827,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.181818181818183,
"min": 3.3181818181818183,
"max": 23.295454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1020.0,
"min": 146.0,
"max": 1277.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.181818181818183,
"min": 3.3181818181818183,
"max": 23.295454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1020.0,
"min": 146.0,
"max": 1277.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735285872",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735286248"
},
"total": 375.810574006,
"count": 1,
"self": 0.3213135149999289,
"children": {
"run_training.setup": {
"total": 0.058057357000052434,
"count": 1,
"self": 0.058057357000052434
},
"TrainerController.start_learning": {
"total": 375.43120313400004,
"count": 1,
"self": 0.32258889801687474,
"children": {
"TrainerController._reset_env": {
"total": 1.9687174969999433,
"count": 1,
"self": 1.9687174969999433
},
"TrainerController.advance": {
"total": 373.05532376598376,
"count": 18192,
"self": 0.32444219200442603,
"children": {
"env_step": {
"total": 258.80072875697215,
"count": 18192,
"self": 194.88347049396407,
"children": {
"SubprocessEnvManager._take_step": {
"total": 63.71721979099948,
"count": 18192,
"self": 1.1796143459782797,
"children": {
"TorchPolicy.evaluate": {
"total": 62.5376054450212,
"count": 18192,
"self": 62.5376054450212
}
}
},
"workers": {
"total": 0.20003847200860037,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 374.6667297500326,
"count": 18192,
"is_parallel": true,
"self": 202.46280011901626,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002289842000209319,
"count": 1,
"is_parallel": true,
"self": 0.0006804640006521367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016093779995571822,
"count": 10,
"is_parallel": true,
"self": 0.0016093779995571822
}
}
},
"UnityEnvironment.step": {
"total": 0.02664588499965248,
"count": 1,
"is_parallel": true,
"self": 0.0004511140000431624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030347499978233827,
"count": 1,
"is_parallel": true,
"self": 0.00030347499978233827
},
"communicator.exchange": {
"total": 0.024591572000190354,
"count": 1,
"is_parallel": true,
"self": 0.024591572000190354
},
"steps_from_proto": {
"total": 0.001299723999636626,
"count": 1,
"is_parallel": true,
"self": 0.00027273399973637424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010269899999002519,
"count": 10,
"is_parallel": true,
"self": 0.0010269899999002519
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 172.2039296310163,
"count": 18191,
"is_parallel": true,
"self": 6.19685793796134,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.2970655469821395,
"count": 18191,
"is_parallel": true,
"self": 3.2970655469821395
},
"communicator.exchange": {
"total": 142.24728810504484,
"count": 18191,
"is_parallel": true,
"self": 142.24728810504484
},
"steps_from_proto": {
"total": 20.462718041027983,
"count": 18191,
"is_parallel": true,
"self": 4.013908121953591,
"children": {
"_process_rank_one_or_two_observation": {
"total": 16.448809919074392,
"count": 181910,
"is_parallel": true,
"self": 16.448809919074392
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 113.93015281700718,
"count": 18192,
"self": 0.39601577402254406,
"children": {
"process_trajectory": {
"total": 24.926398363982116,
"count": 18192,
"self": 24.552189101982094,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37420926200002214,
"count": 4,
"self": 0.37420926200002214
}
}
},
"_update_policy": {
"total": 88.60773867900252,
"count": 90,
"self": 35.30114400799357,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.30659467100895,
"count": 4587,
"self": 53.30659467100895
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0069998097606003e-06,
"count": 1,
"self": 1.0069998097606003e-06
},
"TrainerController._save_models": {
"total": 0.08457196599965755,
"count": 1,
"self": 0.0009384009999848786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08363356499967267,
"count": 1,
"self": 0.08363356499967267
}
}
}
}
}
}
}