Farseer-W's picture
Upload folder using huggingface_hub
e179260 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0367941856384277,
"min": 1.0367941856384277,
"max": 1.0655056238174438,
"count": 5
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10435.333984375,
"min": 10311.0283203125,
"max": 10915.4716796875,
"count": 5
},
"SnowballTarget.Step.mean": {
"value": 349976.0,
"min": 309992.0,
"max": 349976.0,
"count": 5
},
"SnowballTarget.Step.sum": {
"value": 349976.0,
"min": 309992.0,
"max": 349976.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.040831565856934,
"min": 13.79809856414795,
"max": 14.040831565856934,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 4844.0869140625,
"min": 4691.353515625,
"max": 4931.90771484375,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 5
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0658794366727485,
"min": 0.06211032461826963,
"max": 0.0706150818019238,
"count": 5
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.263517746690994,
"min": 0.263517746690994,
"max": 0.3472825798740172,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15005831745471437,
"min": 0.15005831745471437,
"max": 0.17657990674762164,
"count": 5
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6002332698188575,
"min": 0.6002332698188575,
"max": 0.8828995337381083,
"count": 5
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.885812908571415e-06,
"min": 6.885812908571415e-06,
"max": 6.345723016571427e-05,
"count": 5
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.754325163428566e-05,
"min": 2.754325163428566e-05,
"max": 0.0002538289206628571,
"count": 5
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10137714285714286,
"min": 0.10137714285714286,
"max": 0.11269142857142858,
"count": 5
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40550857142857144,
"min": 0.40550857142857144,
"max": 0.5493142857142858,
"count": 5
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.871942857142844e-05,
"min": 7.871942857142844e-05,
"max": 0.0006433022857142858,
"count": 5
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003148777142857138,
"min": 0.0003148777142857138,
"max": 0.002573209142857143,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.15909090909091,
"min": 26.59090909090909,
"max": 28.15909090909091,
"count": 5
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1239.0,
"min": 1170.0,
"max": 1514.0,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.15909090909091,
"min": 26.59090909090909,
"max": 28.15909090909091,
"count": 5
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1239.0,
"min": 1170.0,
"max": 1514.0,
"count": 5
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731683702",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget3 --resume --torch-device cuda:0 --no-graphics --debug",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731683829"
},
"total": 127.34562296200056,
"count": 1,
"self": 0.42530555900066247,
"children": {
"run_training.setup": {
"total": 0.05668498900013219,
"count": 1,
"self": 0.05668498900013219
},
"TrainerController.start_learning": {
"total": 126.86363241399977,
"count": 1,
"self": 0.2151459170263479,
"children": {
"TrainerController._reset_env": {
"total": 2.091820235999876,
"count": 1,
"self": 2.091820235999876
},
"TrainerController.advance": {
"total": 124.43796483097412,
"count": 4565,
"self": 0.06627403594575298,
"children": {
"env_step": {
"total": 124.37169079502837,
"count": 4565,
"self": 77.4977750210328,
"children": {
"SubprocessEnvManager._take_step": {
"total": 46.80662975798077,
"count": 4565,
"self": 0.3603283040047245,
"children": {
"TorchPolicy.evaluate": {
"total": 46.44630145397605,
"count": 4565,
"self": 46.44630145397605
}
}
},
"workers": {
"total": 0.06728601601480477,
"count": 4565,
"self": 0.0,
"children": {
"worker_root": {
"total": 126.45530187803524,
"count": 4565,
"is_parallel": true,
"self": 74.12169770105811,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023176779995992547,
"count": 1,
"is_parallel": true,
"self": 0.0007073850010783644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016102929985208903,
"count": 10,
"is_parallel": true,
"self": 0.0016102929985208903
}
}
},
"UnityEnvironment.step": {
"total": 0.03498332399976789,
"count": 1,
"is_parallel": true,
"self": 0.0006267919998208527,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005173250001462293,
"count": 1,
"is_parallel": true,
"self": 0.0005173250001462293
},
"communicator.exchange": {
"total": 0.03198084399991785,
"count": 1,
"is_parallel": true,
"self": 0.03198084399991785
},
"steps_from_proto": {
"total": 0.0018583629998829565,
"count": 1,
"is_parallel": true,
"self": 0.0003658820023701992,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014924809975127573,
"count": 10,
"is_parallel": true,
"self": 0.0014924809975127573
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 52.33360417697713,
"count": 4564,
"is_parallel": true,
"self": 2.4787895938789006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.3280006540717295,
"count": 4564,
"is_parallel": true,
"self": 1.3280006540717295
},
"communicator.exchange": {
"total": 40.516957113009994,
"count": 4564,
"is_parallel": true,
"self": 40.516957113009994
},
"steps_from_proto": {
"total": 8.009856816016509,
"count": 4564,
"is_parallel": true,
"self": 1.501679792010691,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.508177024005818,
"count": 45640,
"is_parallel": true,
"self": 6.508177024005818
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012469299963413505,
"count": 1,
"self": 0.00012469299963413505,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 123.16921977485345,
"count": 160201,
"is_parallel": true,
"self": 3.418663629007824,
"children": {
"process_trajectory": {
"total": 86.42869974484529,
"count": 160201,
"is_parallel": true,
"self": 86.20989998884488,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2187997560004078,
"count": 1,
"is_parallel": true,
"self": 0.2187997560004078
}
}
},
"_update_policy": {
"total": 33.32185640100033,
"count": 22,
"is_parallel": true,
"self": 11.566497000006166,
"children": {
"TorchPPOOptimizer.update": {
"total": 21.755359400994166,
"count": 1119,
"is_parallel": true,
"self": 21.755359400994166
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11857673699978477,
"count": 1,
"self": 0.0011199229993508197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11745681400043395,
"count": 1,
"self": 0.11745681400043395
}
}
}
}
}
}
}