poco_soccer_two / run_logs /timers.json
GeorgeImmanuel's picture
Kundi Push kundi nai kundi pei kundi thai kundi pee kundi`
179c5c8 verified
raw
history blame
15.7 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.9415829181671143,
"min": 2.9415829181671143,
"max": 2.9415829181671143,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 460487.15625,
"min": 460487.15625,
"max": 460487.15625,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 111.38297872340425,
"min": 111.38297872340425,
"max": 111.38297872340425,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20940.0,
"min": 20940.0,
"max": 20940.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1203.3920947656047,
"min": 1203.3920947656047,
"max": 1203.3920947656047,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 55356.03635921782,
"min": 55356.03635921782,
"max": 55356.03635921782,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 9836.0,
"min": 9836.0,
"max": 9836.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 9836.0,
"min": 9836.0,
"max": 9836.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.046986013650894165,
"min": -0.046986013650894165,
"max": -0.046986013650894165,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -4.322713375091553,
"min": -4.322713375091553,
"max": -4.322713375091553,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04885184392333031,
"min": -0.04885184392333031,
"max": -0.04885184392333031,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.4943695068359375,
"min": -4.4943695068359375,
"max": -4.4943695068359375,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17788695564250584,
"min": -0.17788695564250584,
"max": -0.17788695564250584,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -16.365599919110537,
"min": -16.365599919110537,
"max": -16.365599919110537,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17788695564250584,
"min": -0.17788695564250584,
"max": -0.17788695564250584,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -16.365599919110537,
"min": -16.365599919110537,
"max": -16.365599919110537,
"count": 1
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.2279435114503995,
"min": 0.2279435114503995,
"max": 0.2279435114503995,
"count": 1
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 5.4706442748095885,
"min": 5.4706442748095885,
"max": 5.4706442748095885,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.005402487274283552,
"min": 0.005402487274283552,
"max": 0.005402487274283552,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.12965969458280524,
"min": 0.12965969458280524,
"max": 0.12965969458280524,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.01613315310505718,
"min": 0.01613315310505718,
"max": 0.01613315310505718,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.38719567452137227,
"min": 0.38719567452137227,
"max": 0.38719567452137227,
"count": 1
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0072,
"min": 0.0072,
"max": 0.0072,
"count": 1
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999996,
"min": 0.19999999999999996,
"max": 0.19999999999999996,
"count": 1
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 4.799999999999999,
"min": 4.799999999999999,
"max": 4.799999999999999,
"count": 1
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 1
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.12,
"min": 0.12,
"max": 0.12,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720800030",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\ADMIN\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn C:/Users/ADMIN/for_machine_learning/for_machine_learning/ml/Learnings/deep_learning_tool_kits/Deep_RL/ml-agents/config/poca/SoccerTwos.yaml --env=C:/Users/ADMIN/for_machine_learning/for_machine_learning/ml/Learnings/deep_learning_tool_kits/Deep_RL/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1720800426"
},
"total": 395.8422837999824,
"count": 1,
"self": 0.5873121999902651,
"children": {
"run_training.setup": {
"total": 0.18558990000747144,
"count": 1,
"self": 0.18558990000747144
},
"TrainerController.start_learning": {
"total": 395.06938169998466,
"count": 1,
"self": 0.23730519885430112,
"children": {
"TrainerController._reset_env": {
"total": 11.153327699954389,
"count": 40,
"self": 11.153327699954389
},
"TrainerController.advance": {
"total": 383.334124301211,
"count": 4896,
"self": 0.2583260011742823,
"children": {
"env_step": {
"total": 188.8054977004067,
"count": 4896,
"self": 139.31214919898775,
"children": {
"SubprocessEnvManager._take_step": {
"total": 49.34584840020398,
"count": 4896,
"self": 1.6539560005476233,
"children": {
"TorchPolicy.evaluate": {
"total": 47.69189239965635,
"count": 9784,
"self": 47.69189239965635
}
}
},
"workers": {
"total": 0.14750010121497326,
"count": 4896,
"self": 0.0,
"children": {
"worker_root": {
"total": 386.6030013007403,
"count": 4896,
"is_parallel": true,
"self": 278.12211950161145,
"children": {
"steps_from_proto": {
"total": 0.16964630017173477,
"count": 78,
"is_parallel": true,
"self": 0.03418659997987561,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.13545970019185916,
"count": 312,
"is_parallel": true,
"self": 0.13545970019185916
}
}
},
"UnityEnvironment.step": {
"total": 108.3112354989571,
"count": 4896,
"is_parallel": true,
"self": 6.270320098468801,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.085429701022804,
"count": 4896,
"is_parallel": true,
"self": 6.085429701022804
},
"communicator.exchange": {
"total": 74.54166550075752,
"count": 4896,
"is_parallel": true,
"self": 74.54166550075752
},
"steps_from_proto": {
"total": 21.41382019870798,
"count": 9792,
"is_parallel": true,
"self": 4.229228097276064,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.184592101431917,
"count": 39168,
"is_parallel": true,
"self": 17.184592101431917
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 194.27030059963,
"count": 4896,
"self": 0.9905304000130855,
"children": {
"process_trajectory": {
"total": 7.081178599590203,
"count": 4896,
"self": 7.081178599590203
},
"_update_policy": {
"total": 186.1985916000267,
"count": 24,
"self": 5.064732000144431,
"children": {
"TorchPOCAOptimizer.update": {
"total": 181.13385959988227,
"count": 2427,
"self": 181.13385959988227
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.5999906938523054e-06,
"count": 1,
"self": 2.5999906938523054e-06
},
"TrainerController._save_models": {
"total": 0.3446218999742996,
"count": 1,
"self": 0.010635999991791323,
"children": {
"RLTrainer._checkpoint": {
"total": 0.33398589998250827,
"count": 1,
"self": 0.33398589998250827
}
}
}
}
}
}
}