poca-SoccerTwos / run_logs /timers.json
Farseer-W's picture
Initial upload
f92f35b verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7120624780654907,
"min": 1.6752153635025024,
"max": 3.295743703842163,
"count": 800
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34022.10546875,
"min": 23056.83984375,
"max": 132146.140625,
"count": 800
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 71.07142857142857,
"min": 47.14563106796116,
"max": 999.0,
"count": 800
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19900.0,
"min": 11700.0,
"max": 29732.0,
"count": 800
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1636.939504410227,
"min": 1187.7780098675032,
"max": 1639.39625700252,
"count": 704
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 229171.5306174318,
"min": 2376.8265762762753,
"max": 319879.45122049504,
"count": 704
},
"SoccerTwos.Step.mean": {
"value": 7999976.0,
"min": 9887.0,
"max": 7999976.0,
"count": 800
},
"SoccerTwos.Step.sum": {
"value": 7999976.0,
"min": 9887.0,
"max": 7999976.0,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.026560766622424126,
"min": -0.15737122297286987,
"max": 0.18703752756118774,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.691946506500244,
"min": -19.671401977539062,
"max": 23.208147048950195,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02333911508321762,
"min": -0.16499941051006317,
"max": 0.1860237866640091,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.2441370487213135,
"min": -20.62492561340332,
"max": 23.47559928894043,
"count": 800
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 800
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.08235971190088945,
"min": -0.6275818212465807,
"max": 0.5646181834208501,
"count": 800
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -11.447999954223633,
"min": -57.163999915122986,
"max": 46.37959957122803,
"count": 800
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.08235971190088945,
"min": -0.6275818212465807,
"max": 0.5646181834208501,
"count": 800
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -11.447999954223633,
"min": -57.163999915122986,
"max": 46.37959957122803,
"count": 800
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 800
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 800
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.032967403337048985,
"min": 0.02366156485691641,
"max": 0.047298097182647325,
"count": 799
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.032967403337048985,
"min": 0.02366156485691641,
"max": 0.08833755592220566,
"count": 799
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.044130529639207654,
"min": 7.133955895157998e-09,
"max": 0.06697994998345773,
"count": 799
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.044130529639207654,
"min": 7.133955895157998e-09,
"max": 0.11953396774414513,
"count": 799
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.04712000551323096,
"min": 9.061594309479738e-09,
"max": 0.07186874074654447,
"count": 799
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.04712000551323096,
"min": 9.061594309479738e-09,
"max": 0.13117384041349092,
"count": 799
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003000000000000001,
"count": 799
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0006,
"count": 799
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000004,
"min": 0.19999999999999993,
"max": 0.20000000000000004,
"count": 799
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000004,
"min": 0.19999999999999993,
"max": 0.4000000000000001,
"count": 799
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 799
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.010000000000000002,
"count": 799
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1732570473",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\V:\\ANACONDA\\envs\\MARL\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1732592411"
},
"total": 21937.200009100008,
"count": 1,
"self": 0.29581590000452707,
"children": {
"run_training.setup": {
"total": 0.08974570000282256,
"count": 1,
"self": 0.08974570000282256
},
"TrainerController.start_learning": {
"total": 21936.8144475,
"count": 1,
"self": 12.929385196781368,
"children": {
"TrainerController._reset_env": {
"total": 8.304657699976815,
"count": 40,
"self": 8.304657699976815
},
"TrainerController.advance": {
"total": 21915.47576830325,
"count": 539612,
"self": 12.730081307832734,
"children": {
"env_step": {
"total": 9690.569411797682,
"count": 539612,
"self": 7637.877279600281,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2044.9682655006618,
"count": 539613,
"self": 71.9575318003408,
"children": {
"TorchPolicy.evaluate": {
"total": 1973.010733700321,
"count": 1011376,
"self": 1973.010733700321
}
}
},
"workers": {
"total": 7.723866696738696,
"count": 539612,
"self": 0.0,
"children": {
"worker_root": {
"total": 21908.103088694617,
"count": 539612,
"is_parallel": true,
"self": 15825.960818394851,
"children": {
"steps_from_proto": {
"total": 0.06807190001563868,
"count": 82,
"is_parallel": true,
"self": 0.014305900032923091,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.053765999982715584,
"count": 328,
"is_parallel": true,
"self": 0.053765999982715584
}
}
},
"UnityEnvironment.step": {
"total": 6082.0741983997505,
"count": 539612,
"is_parallel": true,
"self": 257.43906669433636,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 234.81601379493804,
"count": 539612,
"is_parallel": true,
"self": 234.81601379493804
},
"communicator.exchange": {
"total": 4746.347336301478,
"count": 539612,
"is_parallel": true,
"self": 4746.347336301478
},
"steps_from_proto": {
"total": 843.4717816089978,
"count": 1079224,
"is_parallel": true,
"self": 180.56753821792518,
"children": {
"_process_rank_one_or_two_observation": {
"total": 662.9042433910727,
"count": 4316896,
"is_parallel": true,
"self": 662.9042433910727
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 12212.176275197737,
"count": 539612,
"self": 81.58882319513214,
"children": {
"process_trajectory": {
"total": 1681.1291229024064,
"count": 539612,
"self": 1679.1809420024001,
"children": {
"RLTrainer._checkpoint": {
"total": 1.948180900006264,
"count": 16,
"self": 1.948180900006264
}
}
},
"_update_policy": {
"total": 10449.458329100198,
"count": 1225,
"self": 1115.510473299175,
"children": {
"TorchPOCAOptimizer.update": {
"total": 9333.947855801023,
"count": 45765,
"self": 9333.947855801023
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4999968698248267e-06,
"count": 1,
"self": 1.4999968698248267e-06
},
"TrainerController._save_models": {
"total": 0.1046347999945283,
"count": 1,
"self": 0.0016571999876759946,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1029776000068523,
"count": 1,
"self": 0.1029776000068523
}
}
}
}
}
}
}