Fhermin's picture
Pues aqui anda
e28a668 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9206799268722534,
"min": 0.9123677015304565,
"max": 2.8675155639648438,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8800.779296875,
"min": 8800.779296875,
"max": 29366.2265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.81261920928955,
"min": 0.5126653909683228,
"max": 12.81261920928955,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2498.460693359375,
"min": 99.45708465576172,
"max": 2594.7666015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06614251207564867,
"min": 0.0645576686239984,
"max": 0.07646987594974537,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2645700483025947,
"min": 0.2594024864734178,
"max": 0.36738553077009994,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19700146795195694,
"min": 0.11111410530101436,
"max": 0.2758441340689566,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7880058718078278,
"min": 0.44445642120405743,
"max": 1.379220670344783,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.75,
"min": 3.272727272727273,
"max": 25.75,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1133.0,
"min": 144.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.75,
"min": 3.272727272727273,
"max": 25.75,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1133.0,
"min": 144.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709006751",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709007218"
},
"total": 466.29137721699993,
"count": 1,
"self": 0.43260808499996983,
"children": {
"run_training.setup": {
"total": 0.05405732599996327,
"count": 1,
"self": 0.05405732599996327
},
"TrainerController.start_learning": {
"total": 465.804711806,
"count": 1,
"self": 0.6706142669938799,
"children": {
"TrainerController._reset_env": {
"total": 3.913575036999987,
"count": 1,
"self": 3.913575036999987
},
"TrainerController.advance": {
"total": 461.12336688400615,
"count": 18198,
"self": 0.32076414599180225,
"children": {
"env_step": {
"total": 460.80260273801434,
"count": 18198,
"self": 301.7543361750268,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.71597416498662,
"count": 18198,
"self": 1.616765295985715,
"children": {
"TorchPolicy.evaluate": {
"total": 157.0992088690009,
"count": 18198,
"self": 157.0992088690009
}
}
},
"workers": {
"total": 0.33229239800090227,
"count": 18198,
"self": 0.0,
"children": {
"worker_root": {
"total": 464.4867120749974,
"count": 18198,
"is_parallel": true,
"self": 228.66628955300257,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005567324000026019,
"count": 1,
"is_parallel": true,
"self": 0.0038812579999785157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016860660000475036,
"count": 10,
"is_parallel": true,
"self": 0.0016860660000475036
}
}
},
"UnityEnvironment.step": {
"total": 0.057698859000026914,
"count": 1,
"is_parallel": true,
"self": 0.0007106190000172319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040852400002222566,
"count": 1,
"is_parallel": true,
"self": 0.00040852400002222566
},
"communicator.exchange": {
"total": 0.05354716900001222,
"count": 1,
"is_parallel": true,
"self": 0.05354716900001222
},
"steps_from_proto": {
"total": 0.0030325469999752386,
"count": 1,
"is_parallel": true,
"self": 0.0003895390000252519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026430079999499867,
"count": 10,
"is_parallel": true,
"self": 0.0026430079999499867
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.82042252199483,
"count": 18197,
"is_parallel": true,
"self": 11.245649515983928,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.570595125001489,
"count": 18197,
"is_parallel": true,
"self": 5.570595125001489
},
"communicator.exchange": {
"total": 184.4325632100053,
"count": 18197,
"is_parallel": true,
"self": 184.4325632100053
},
"steps_from_proto": {
"total": 34.57161467100411,
"count": 18197,
"is_parallel": true,
"self": 6.674595932997477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.897018738006636,
"count": 181970,
"is_parallel": true,
"self": 27.897018738006636
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018929899999875488,
"count": 1,
"self": 0.00018929899999875488,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 455.7214633540278,
"count": 688560,
"is_parallel": true,
"self": 14.19929638198721,
"children": {
"process_trajectory": {
"total": 251.0138054120403,
"count": 688560,
"is_parallel": true,
"self": 250.63681556104024,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37698985100007576,
"count": 3,
"is_parallel": true,
"self": 0.37698985100007576
}
}
},
"_update_policy": {
"total": 190.50836156000025,
"count": 90,
"is_parallel": true,
"self": 54.824734230006584,
"children": {
"TorchPPOOptimizer.update": {
"total": 135.68362732999367,
"count": 4584,
"is_parallel": true,
"self": 135.68362732999367
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0969663189999892,
"count": 1,
"self": 0.001052568999966752,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09591375000002245,
"count": 1,
"self": 0.09591375000002245
}
}
}
}
}
}
}