EvaOr's picture
Push of the model of the Snowball Target environment
c5e955c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.4985675513744354,
"min": 0.48494648933410645,
"max": 2.8707730770111084,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4743.8701171875,
"min": 4734.298828125,
"max": 29525.900390625,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.144131660461426,
"min": 0.26199665665626526,
"max": 14.316263198852539,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2758.105712890625,
"min": 50.82735061645508,
"max": 2934.833984375,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0637770346374429,
"min": 0.05963628309394813,
"max": 0.08114146205366078,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2551081385497716,
"min": 0.23854513237579253,
"max": 0.4057073102683039,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14795954876086292,
"min": 0.11607741353252685,
"max": 0.26481452268712663,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5918381950434517,
"min": 0.4643096541301074,
"max": 1.324072613435633,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3764995412000027e-06,
"min": 1.3764995412000027e-06,
"max": 0.0002983764005411999,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.505998164800011e-06,
"min": 5.505998164800011e-06,
"max": 0.001477032007656,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10045880000000001,
"min": 0.10045880000000001,
"max": 0.1994588,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40183520000000006,
"min": 0.40183520000000006,
"max": 0.992344,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.289412000000004e-05,
"min": 3.289412000000004e-05,
"max": 0.00497299412,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013157648000000017,
"min": 0.00013157648000000017,
"max": 0.0246179656,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.933333333333334,
"min": 3.0,
"max": 28.377777777777776,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1257.0,
"min": 132.0,
"max": 1539.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.933333333333334,
"min": 3.0,
"max": 28.377777777777776,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1257.0,
"min": 132.0,
"max": 1539.0,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680174634",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680176873"
},
"total": 2239.526590824,
"count": 1,
"self": 0.4307974049997938,
"children": {
"run_training.setup": {
"total": 0.11080872399998043,
"count": 1,
"self": 0.11080872399998043
},
"TrainerController.start_learning": {
"total": 2238.984984695,
"count": 1,
"self": 2.5971139529315224,
"children": {
"TrainerController._reset_env": {
"total": 9.395622794000019,
"count": 1,
"self": 9.395622794000019
},
"TrainerController.advance": {
"total": 2226.8639339670685,
"count": 90937,
"self": 1.2736360480407711,
"children": {
"env_step": {
"total": 2225.5902979190278,
"count": 90937,
"self": 1596.4252580241043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 627.8563789190018,
"count": 90937,
"self": 11.318954485023596,
"children": {
"TorchPolicy.evaluate": {
"total": 616.5374244339782,
"count": 90937,
"self": 616.5374244339782
}
}
},
"workers": {
"total": 1.3086609759216117,
"count": 90937,
"self": 0.0,
"children": {
"worker_root": {
"total": 2232.3122450249302,
"count": 90937,
"is_parallel": true,
"self": 1067.8020832078946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005535050999924351,
"count": 1,
"is_parallel": true,
"self": 0.0035557410001274548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019793099997968966,
"count": 10,
"is_parallel": true,
"self": 0.0019793099997968966
}
}
},
"UnityEnvironment.step": {
"total": 0.04609433100017668,
"count": 1,
"is_parallel": true,
"self": 0.00045271300041349605,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029810299997734546,
"count": 1,
"is_parallel": true,
"self": 0.00029810299997734546
},
"communicator.exchange": {
"total": 0.043678958999862516,
"count": 1,
"is_parallel": true,
"self": 0.043678958999862516
},
"steps_from_proto": {
"total": 0.0016645559999233228,
"count": 1,
"is_parallel": true,
"self": 0.0003627529997629608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001301803000160362,
"count": 10,
"is_parallel": true,
"self": 0.001301803000160362
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1164.5101618170356,
"count": 90936,
"is_parallel": true,
"self": 46.12027794189112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.373760011039167,
"count": 90936,
"is_parallel": true,
"self": 25.373760011039167
},
"communicator.exchange": {
"total": 944.7992827400683,
"count": 90936,
"is_parallel": true,
"self": 944.7992827400683
},
"steps_from_proto": {
"total": 148.21684112403705,
"count": 90936,
"is_parallel": true,
"self": 29.030939516187573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 119.18590160784947,
"count": 909360,
"is_parallel": true,
"self": 119.18590160784947
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00024894199987102184,
"count": 1,
"self": 0.00024894199987102184,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 2211.5093824378973,
"count": 1942421,
"is_parallel": true,
"self": 45.81380997078304,
"children": {
"process_trajectory": {
"total": 1234.8657162741101,
"count": 1942421,
"is_parallel": true,
"self": 1183.0454632841115,
"children": {
"RLTrainer._checkpoint": {
"total": 51.82025298999861,
"count": 200,
"is_parallel": true,
"self": 51.82025298999861
}
}
},
"_update_policy": {
"total": 930.8298561930039,
"count": 454,
"is_parallel": true,
"self": 341.6417161570182,
"children": {
"TorchPPOOptimizer.update": {
"total": 589.1881400359857,
"count": 23136,
"is_parallel": true,
"self": 589.1881400359857
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12806503899992094,
"count": 1,
"self": 0.0008491259995935252,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12721591300032742,
"count": 1,
"self": 0.12721591300032742
}
}
}
}
}
}
}