poca-SoccerTwos / run_logs /timers.json
NeuNav's picture
First Push
ef555b2 verified
raw
history blame
15.5 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.958897590637207,
"min": 1.958897590637207,
"max": 3.295691728591919,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40180.90625,
"min": 27310.2109375,
"max": 114069.5703125,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 58.75,
"min": 42.18103448275862,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19740.0,
"min": 13868.0,
"max": 26396.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1510.6785520503622,
"min": 1197.7163201944231,
"max": 1510.7273914316831,
"count": 488
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 253793.99674446083,
"min": 2399.754687971731,
"max": 344817.26437304646,
"count": 488
},
"SoccerTwos.Step.mean": {
"value": 4999823.0,
"min": 9216.0,
"max": 4999823.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999823.0,
"min": 9216.0,
"max": 4999823.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.06482566148042679,
"min": -0.1127309650182724,
"max": 0.13317570090293884,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 10.955536842346191,
"min": -18.26241683959961,
"max": 19.894054412841797,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.06249595806002617,
"min": -0.11246709525585175,
"max": 0.13988922536373138,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 10.561817169189453,
"min": -18.219669342041016,
"max": 19.86170196533203,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.016142012099542562,
"min": -0.75,
"max": 0.47272131579821225,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 2.728000044822693,
"min": -51.80399978160858,
"max": 54.885600090026855,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.016142012099542562,
"min": -0.75,
"max": 0.47272131579821225,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 2.728000044822693,
"min": -51.80399978160858,
"max": 54.885600090026855,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019074402245072026,
"min": 0.011203128710621967,
"max": 0.022758910306341324,
"count": 239
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019074402245072026,
"min": 0.011203128710621967,
"max": 0.022758910306341324,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0759902315835158,
"min": 0.00011842909461847739,
"max": 0.09545681774616241,
"count": 239
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0759902315835158,
"min": 0.00011842909461847739,
"max": 0.09545681774616241,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07670819784204165,
"min": 0.0001206130289574503,
"max": 0.09665488079190254,
"count": 239
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07670819784204165,
"min": 0.0001206130289574503,
"max": 0.09665488079190254,
"count": 239
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 239
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 239
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 239
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706892802",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\ksmsu\\miniconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1706904037"
},
"total": 11234.566408800078,
"count": 1,
"self": 1.690612900070846,
"children": {
"run_training.setup": {
"total": 0.09289009997155517,
"count": 1,
"self": 0.09289009997155517
},
"TrainerController.start_learning": {
"total": 11232.782905800035,
"count": 1,
"self": 8.93734723329544,
"children": {
"TrainerController._reset_env": {
"total": 4.484800899983384,
"count": 25,
"self": 4.484800899983384
},
"TrainerController.advance": {
"total": 11219.232637566747,
"count": 340045,
"self": 8.987933204043657,
"children": {
"env_step": {
"total": 5302.118825148093,
"count": 340045,
"self": 4140.895049570827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1155.8955111680552,
"count": 340045,
"self": 44.82748165098019,
"children": {
"TorchPolicy.evaluate": {
"total": 1111.068029517075,
"count": 631986,
"self": 1111.068029517075
}
}
},
"workers": {
"total": 5.328264409210533,
"count": 340045,
"self": 0.0,
"children": {
"worker_root": {
"total": 11218.552969845128,
"count": 340045,
"is_parallel": true,
"self": 7970.805876631057,
"children": {
"steps_from_proto": {
"total": 0.03280089981853962,
"count": 50,
"is_parallel": true,
"self": 0.007205399568192661,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02559550025034696,
"count": 200,
"is_parallel": true,
"self": 0.02559550025034696
}
}
},
"UnityEnvironment.step": {
"total": 3247.714292314253,
"count": 340045,
"is_parallel": true,
"self": 146.5276657892391,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 117.13562141440343,
"count": 340045,
"is_parallel": true,
"self": 117.13562141440343
},
"communicator.exchange": {
"total": 2531.7168078834657,
"count": 340045,
"is_parallel": true,
"self": 2531.7168078834657
},
"steps_from_proto": {
"total": 452.3341972271446,
"count": 680090,
"is_parallel": true,
"self": 97.30342673347332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 355.0307704936713,
"count": 2720360,
"is_parallel": true,
"self": 355.0307704936713
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5908.125879214611,
"count": 340045,
"self": 67.7687984801596,
"children": {
"process_trajectory": {
"total": 1096.434175533941,
"count": 340045,
"self": 1095.0075900340453,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4265854998957366,
"count": 10,
"self": 1.4265854998957366
}
}
},
"_update_policy": {
"total": 4743.92290520051,
"count": 239,
"self": 687.9365829988383,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4055.986322201672,
"count": 7173,
"self": 4055.986322201672
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.00000761449337e-06,
"count": 1,
"self": 1.00000761449337e-06
},
"TrainerController._save_models": {
"total": 0.12811910000164062,
"count": 1,
"self": 0.015942999976687133,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11217610002495348,
"count": 1,
"self": 0.11217610002495348
}
}
}
}
}
}
}