philippds's picture
Upload 13 files
ffda851 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.461629033088684,
"min": 1.4189385175704956,
"max": 1.4624531269073486,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8804.853515625,
"min": 7319.8828125,
"max": 10341.7890625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.05555555555555555,
"min": 0.0,
"max": 394.8666666666667,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 1.0,
"min": 0.0,
"max": 5967.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.7222222222222222,
"min": 0.3888888888888889,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 13.0,
"min": 6.0,
"max": 13.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 335.0,
"min": 284.25,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 6030.0,
"min": 5004.0,
"max": 7101.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199702.0,
"min": 5600.0,
"max": 1199702.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199702.0,
"min": 5600.0,
"max": 1199702.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.12799793481826782,
"min": 0.029348647221922874,
"max": 1.0539227724075317,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 2.3039627075195312,
"min": 0.44022971391677856,
"max": 15.80884075164795,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8996142148971558,
"min": -0.19201916456222534,
"max": 2.7600245475769043,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 16.193056106567383,
"min": -3.0723066329956055,
"max": 55.20048904418945,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4.716027929344111,
"min": -1.1326333165168763,
"max": 32.35911449293295,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 84.888502728194,
"min": -16.989499747753143,
"max": 582.4640608727932,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.4482337481652697,
"min": 0.0,
"max": 14.208261092503866,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 8.068207466974854,
"min": 0.0,
"max": 255.74869966506958,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4.244424697839552,
"min": -1.0193702220916747,
"max": 29.12319684235586,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 76.39964456111193,
"min": -15.290553331375122,
"max": 524.2175431624055,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.02185002815288802,
"min": 0.01476107951020822,
"max": 0.03155452210921794,
"count": 140
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.02185002815288802,
"min": 0.01476107951020822,
"max": 0.03155452210921794,
"count": 140
},
"Agent.Losses.ValueLoss.mean": {
"value": 11.1973377764225,
"min": 0.0014995217449419822,
"max": 22.843181212743122,
"count": 140
},
"Agent.Losses.ValueLoss.sum": {
"value": 11.1973377764225,
"min": 0.0014995217449419822,
"max": 22.843181212743122,
"count": 140
},
"Agent.Policy.LearningRate.mean": {
"value": 5.745998085e-07,
"min": 5.745998085e-07,
"max": 0.0002979000007,
"count": 140
},
"Agent.Policy.LearningRate.sum": {
"value": 5.745998085e-07,
"min": 5.745998085e-07,
"max": 0.0002979000007,
"count": 140
},
"Agent.Policy.Epsilon.mean": {
"value": 0.1001915,
"min": 0.1001915,
"max": 0.1993,
"count": 140
},
"Agent.Policy.Epsilon.sum": {
"value": 0.1001915,
"min": 0.1001915,
"max": 0.1993,
"count": 140
},
"Agent.Policy.Beta.mean": {
"value": 1.9555850000000004e-05,
"min": 1.9555850000000004e-05,
"max": 0.00496507,
"count": 140
},
"Agent.Policy.Beta.sum": {
"value": 1.9555850000000004e-05,
"min": 1.9555850000000004e-05,
"max": 0.00496507,
"count": 140
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.013372027858470878,
"min": 0.00993607259200265,
"max": 0.5835270757476488,
"count": 140
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.013372027858470878,
"min": 0.00993607259200265,
"max": 0.5835270757476488,
"count": 140
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 2.372185468673706,
"min": 2.317383666833242,
"max": 3.3108297189076743,
"count": 140
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 2.372185468673706,
"min": 2.317383666833242,
"max": 3.3108297189076743,
"count": 140
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717759085",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_9_task_2_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_2_run_id_2_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1717762680"
},
"total": 3594.6521040000002,
"count": 1,
"self": 0.2845319000002746,
"children": {
"run_training.setup": {
"total": 0.05085230000000007,
"count": 1,
"self": 0.05085230000000007
},
"TrainerController.start_learning": {
"total": 3594.3167198,
"count": 1,
"self": 4.735316700025123,
"children": {
"TrainerController._reset_env": {
"total": 2.0784781000000003,
"count": 1,
"self": 2.0784781000000003
},
"TrainerController.advance": {
"total": 3587.337379899975,
"count": 400923,
"self": 4.43875570006503,
"children": {
"env_step": {
"total": 3582.89862419991,
"count": 400923,
"self": 1560.0457609998743,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2019.7551650000885,
"count": 400923,
"self": 10.148434200125394,
"children": {
"TorchPolicy.evaluate": {
"total": 2009.606730799963,
"count": 400035,
"self": 2009.606730799963
}
}
},
"workers": {
"total": 3.0976981999472724,
"count": 400923,
"self": 0.0,
"children": {
"worker_root": {
"total": 3587.713058600138,
"count": 400923,
"is_parallel": true,
"self": 2206.219591200131,
"children": {
"steps_from_proto": {
"total": 0.006369700000000034,
"count": 1,
"is_parallel": true,
"self": 0.00010019999999966167,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006224900000000089,
"count": 2,
"is_parallel": true,
"self": 2.750000000029118e-05,
"children": {
"_observation_to_np_array": {
"total": 0.0061973999999997975,
"count": 3,
"is_parallel": true,
"self": 2.8899999999776327e-05,
"children": {
"process_pixels": {
"total": 0.006168500000000021,
"count": 3,
"is_parallel": true,
"self": 0.0002241999999998967,
"children": {
"image_decompress": {
"total": 0.0059443000000001245,
"count": 3,
"is_parallel": true,
"self": 0.0059443000000001245
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.46000000002833e-05,
"count": 2,
"is_parallel": true,
"self": 4.46000000002833e-05
}
}
},
"UnityEnvironment.step": {
"total": 1381.4870977000069,
"count": 400923,
"is_parallel": true,
"self": 15.452664199902756,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.870952999977682,
"count": 400923,
"is_parallel": true,
"self": 18.870952999977682
},
"communicator.exchange": {
"total": 1219.6390342000182,
"count": 400923,
"is_parallel": true,
"self": 1219.6390342000182
},
"steps_from_proto": {
"total": 127.52444630010842,
"count": 400923,
"is_parallel": true,
"self": 25.686103800257825,
"children": {
"_process_maybe_compressed_observation": {
"total": 91.04513479994904,
"count": 801846,
"is_parallel": true,
"self": 7.155549800130132,
"children": {
"_observation_to_np_array": {
"total": 83.88958499981891,
"count": 1203327,
"is_parallel": true,
"self": 7.087501699546948,
"children": {
"process_pixels": {
"total": 76.80208330027196,
"count": 1203327,
"is_parallel": true,
"self": 36.39249940035934,
"children": {
"image_decompress": {
"total": 40.40958389991262,
"count": 1203327,
"is_parallel": true,
"self": 40.40958389991262
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 10.79320769990155,
"count": 801846,
"is_parallel": true,
"self": 10.79320769990155
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.819999983534217e-05,
"count": 1,
"self": 3.819999983534217e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3590.015360799979,
"count": 173307,
"is_parallel": true,
"self": 5.554849099913099,
"children": {
"process_trajectory": {
"total": 2830.8935339000654,
"count": 173307,
"is_parallel": true,
"self": 2830.4691806000656,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42435329999989335,
"count": 2,
"is_parallel": true,
"self": 0.42435329999989335
}
}
},
"_update_policy": {
"total": 753.5669778000006,
"count": 140,
"is_parallel": true,
"self": 501.4560800999989,
"children": {
"TorchPPOOptimizer.update": {
"total": 252.11089770000166,
"count": 3372,
"is_parallel": true,
"self": 252.11089770000166
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1655068999998548,
"count": 1,
"self": 0.006168099999740662,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15933880000011413,
"count": 1,
"self": 0.15933880000011413
}
}
}
}
}
}
}