|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
import csv |
|
import json |
|
import os |
|
import numpy as np |
|
import datasets |
|
from datasets import Value |
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
norm_values = { |
|
'B01': {'mean': 0.12478869, 'std': 0.024433358, 'min': 1e-04, 'max': 1.8808, 'p1': 0.0787, 'p99': 0.1946}, |
|
'B02': {'mean': 0.13480005, 'std': 0.02822557, 'min': 1e-04, 'max': 2.1776, 'p1': 0.0925, 'p99': 0.2216}, |
|
'B03': {'mean': 0.16031432, 'std': 0.032037303, 'min': 1e-04, 'max': 2.12, 'p1': 0.1035, 'p99': 0.2556}, |
|
'B04': {'mean': 0.1532097, 'std': 0.038628064, 'min': 1e-04, 'max': 2.0032, 'p1': 0.1023, 'p99': 0.2816}, |
|
'B05': {'mean': 0.20312776, 'std': 0.04205057, 'min': 0.0422, 'max': 1.7502, 'p1': 0.1178, 'p99': 0.319}, |
|
'B06': {'mean': 0.32636437, 'std': 0.07139242, 'min': 0.0502, 'max': 1.7245, 'p1': 0.1633, 'p99': 0.519}, |
|
'B07': {'mean': 0.36605212, 'std': 0.08555025, 'min': 0.0616, 'max': 1.7149, 'p1': 0.1776, 'p99': 0.6076}, |
|
'B08': {'mean': 0.3811653, 'std': 0.092815965, 'min': 1e-04, 'max': 1.7488, 'p1': 0.1691, 'p99': 0.646}, |
|
'B8A': {'mean': 0.3910436, 'std': 0.0896364, 'min': 0.055, 'max': 1.688, 'p1': 0.1871, 'p99': 0.6386}, |
|
'B09': {'mean': 0.3910644, 'std': 0.0836445, 'min': 0.0012, 'max': 1.7915, 'p1': 0.2124, 'p99': 0.6241}, |
|
'B11': {'mean': 0.2917373, 'std': 0.07472579, 'min': 0.0953, 'max': 1.648, 'p1': 0.1334, 'p99': 0.4827}, |
|
'B12': {'mean': 0.21169408, 'std': 0.05880649, 'min': 0.0975, 'max': 1.6775, 'p1': 0.115, 'p99': 0.3872}} |
|
|
|
feature_dtype = {'s2_num_days': Value('int16'), |
|
'gedi_num_days': Value('uint16'), |
|
'lat': Value('float32'), |
|
'lon': Value('float32'), |
|
"agbd_se": Value('float32'), |
|
"elev_lowes": Value('float32'), |
|
"leaf_off_f": Value('uint8'), |
|
"pft_class": Value('uint8'), |
|
"region_cla": Value('uint8'), |
|
"rh98": Value('float32'), |
|
"sensitivity": Value('float32'), |
|
"solar_elev": Value('float32'), |
|
"urban_prop":Value('uint8')} |
|
|
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
def __init__(self, *args, additional_features=[], normalize_data=True, patch_size=15, **kwargs): |
|
self.inner_dataset_kwargs = kwargs |
|
self._is_streaming = False |
|
self.patch_size = patch_size |
|
self.normalize_data = normalize_data |
|
self.additional_features = additional_features |
|
super().__init__(*args, **kwargs) |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="default", version=VERSION, description="Normalized data"), |
|
datasets.BuilderConfig(name="unnormalized", version=VERSION, description="Unnormalized data"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def as_streaming_dataset(self, split=None, base_path=None): |
|
self._is_streaming = True |
|
return super().as_streaming_dataset(split=split, base_path=base_path) |
|
|
|
def _info(self): |
|
all_features = { |
|
'input': datasets.Sequence(datasets.Sequence(datasets.Sequence(datasets.Value('float32')))), |
|
'label': Value('float32') |
|
} |
|
for feat in self.additional_features: |
|
all_features[feat] = feature_dtype[feat] |
|
features = datasets.Features(all_features) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def denormalize_s2(self, patch): |
|
res = [] |
|
for band, band_value in zip(['B04', 'B03', 'B02'], [patch[3], patch[2], patch[1]]): |
|
p1, p99 = norm_values[band]['p1'], norm_values[band]['p99'] |
|
band_value = (p99 - p1) * band_value + p1 |
|
res.append(band_value) |
|
patch[3], patch[2], patch[1] = res |
|
return patch |
|
|
|
def _split_generators(self, dl_manager): |
|
self.original_dataset = datasets.load_dataset("prs-eth/AGBD_raw", streaming=self._is_streaming) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"split": "val"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test"}), |
|
] |
|
|
|
def _generate_examples(self, split): |
|
for i, d in enumerate(self.original_dataset[split]): |
|
if self.config.name == "default": |
|
data = {'input': np.asarray(d["input"]), 'label': d["label"]} |
|
elif self.config.name == "unnormalized": |
|
data = {'input': np.asarray(self.denormalize_s2(np.array(d["input"]))), 'label': d["label"]} |
|
|
|
start_x = (data["input"].shape[1] - self.patch_size) // 2 |
|
start_y = (data["input"].shape[2] - self.patch_size) // 2 |
|
data["input"] = data["input"][:, start_x:start_x + self.patch_size, start_y:start_y + self.patch_size] |
|
|
|
for feat in self.additional_features: |
|
data[feat] = d["metadata"][feat] |
|
|
|
yield i, data |
|
|
|
|
|
|