PrithviS's picture
First Push
3bb6d52 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.3606890439987183,
"min": 1.3207616806030273,
"max": 3.2958154678344727,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27997.537109375,
"min": 7348.11328125,
"max": 167852.40625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 64.92207792207792,
"min": 41.31858407079646,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19996.0,
"min": 15984.0,
"max": 26616.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1944.2369976366379,
"min": 1186.2772326853458,
"max": 1956.3347176419152,
"count": 4794
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 299412.49763604224,
"min": 2372.5625412596405,
"max": 425002.35477958986,
"count": 4794
},
"SoccerTwos.Step.mean": {
"value": 49999862.0,
"min": 9326.0,
"max": 49999862.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999862.0,
"min": 9326.0,
"max": 49999862.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.003132409183308482,
"min": -0.10575120896100998,
"max": 0.3213760256767273,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.47925859689712524,
"min": -20.159502029418945,
"max": 44.83538055419922,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0028535863384604454,
"min": -0.10418880730867386,
"max": 0.32389914989471436,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4365987181663513,
"min": -21.083391189575195,
"max": 44.63798141479492,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.10041568676630656,
"min": -0.5714285714285714,
"max": 0.8144077510796777,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 15.363600075244904,
"min": -62.99719989299774,
"max": 105.05859988927841,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.10041568676630656,
"min": -0.5714285714285714,
"max": 0.8144077510796777,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 15.363600075244904,
"min": -62.99719989299774,
"max": 105.05859988927841,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.013992966905546685,
"min": 0.009055629027837615,
"max": 0.026194270861257488,
"count": 2408
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.013992966905546685,
"min": 0.009055629027837615,
"max": 0.026194270861257488,
"count": 2408
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.08764854321877162,
"min": 1.2683622570008878e-07,
"max": 0.10234996452927589,
"count": 2408
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.08764854321877162,
"min": 1.2683622570008878e-07,
"max": 0.10234996452927589,
"count": 2408
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08808425143361091,
"min": 1.2518245879888922e-07,
"max": 0.10266090234120687,
"count": 2408
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08808425143361091,
"min": 1.2518245879888922e-07,
"max": 0.10266090234120687,
"count": 2408
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 4.656996902000402e-08,
"min": 4.656996902000402e-08,
"max": 0.000149936022042652,
"count": 2408
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 4.656996902000402e-08,
"min": 4.656996902000402e-08,
"max": 0.000149936022042652,
"count": 2408
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10005266599999998,
"min": 0.10005266599999998,
"max": 0.26992749159999996,
"count": 2408
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10005266599999998,
"min": 0.10005266599999998,
"max": 0.26992749159999996,
"count": 2408
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.3094902000000267e-05,
"min": 1.3094902000000267e-05,
"max": 0.0099957390652,
"count": 2408
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.3094902000000267e-05,
"min": 1.3094902000000267e-05,
"max": 0.0099957390652,
"count": 2408
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717231552",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/prithvi/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1717459891"
},
"total": 228338.0476951669,
"count": 1,
"self": 0.5725247919326648,
"children": {
"run_training.setup": {
"total": 0.024256707984022796,
"count": 1,
"self": 0.024256707984022796
},
"TrainerController.start_learning": {
"total": 228337.45091366698,
"count": 1,
"self": 42.539694306091405,
"children": {
"TrainerController._reset_env": {
"total": 19.4115347933257,
"count": 250,
"self": 19.4115347933257
},
"TrainerController.advance": {
"total": 228275.35854177666,
"count": 3423208,
"self": 39.36356943077408,
"children": {
"env_step": {
"total": 180749.49001260777,
"count": 3423208,
"self": 174422.59828599403,
"children": {
"SubprocessEnvManager._take_step": {
"total": 6301.572822949383,
"count": 3423208,
"self": 194.92717085045297,
"children": {
"TorchPolicy.evaluate": {
"total": 6106.64565209893,
"count": 6310568,
"self": 6106.64565209893
}
}
},
"workers": {
"total": 25.318903664359823,
"count": 3423208,
"self": 0.0,
"children": {
"worker_root": {
"total": 228269.80472033995,
"count": 3423208,
"is_parallel": true,
"self": 58624.286830272176,
"children": {
"steps_from_proto": {
"total": 0.5045759890927002,
"count": 500,
"is_parallel": true,
"self": 0.05740836798213422,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.44716762111056596,
"count": 2000,
"is_parallel": true,
"self": 0.44716762111056596
}
}
},
"UnityEnvironment.step": {
"total": 169645.01331407868,
"count": 3423208,
"is_parallel": true,
"self": 456.1211591894971,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3106.9896800647257,
"count": 3423208,
"is_parallel": true,
"self": 3106.9896800647257
},
"communicator.exchange": {
"total": 159872.00061112107,
"count": 3423208,
"is_parallel": true,
"self": 159872.00061112107
},
"steps_from_proto": {
"total": 6209.901863703388,
"count": 6846416,
"is_parallel": true,
"self": 679.2257801635424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5530.676083539845,
"count": 27385664,
"is_parallel": true,
"self": 5530.676083539845
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 47486.504959738115,
"count": 3423208,
"self": 353.96039831102826,
"children": {
"process_trajectory": {
"total": 7820.259917136049,
"count": 3423208,
"self": 7791.687315305346,
"children": {
"RLTrainer._checkpoint": {
"total": 28.572601830703206,
"count": 100,
"self": 28.572601830703206
}
}
},
"_update_policy": {
"total": 39312.28464429104,
"count": 2408,
"self": 3858.2221490669763,
"children": {
"TorchPOCAOptimizer.update": {
"total": 35454.06249522406,
"count": 72240,
"self": 35454.06249522406
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.5797787606716156e-07,
"count": 1,
"self": 4.5797787606716156e-07
},
"TrainerController._save_models": {
"total": 0.14114233292639256,
"count": 1,
"self": 0.0030910419300198555,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1380512909963727,
"count": 1,
"self": 0.1380512909963727
}
}
}
}
}
}
}