test_parquet / test_parquet.py
Julia Moska
added tryout script
0ae3740
raw
history blame
2.12 kB
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
logger = datasets.utils.logging.get_logger(__name__)
_URLS = { "train": "https://huggingface.co/datasets/moska/test_parquet/resolve/main/data/example.parquet" }
class ParquetDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
version=datasets.Version(version="0.0.1"),
description=f"test_parquet dataset.",
)
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description="reading parquet format.",
# This defines the different columns of the dataset and their types
features=datasets.Features(
{ "pop_est": datasets.Value(dtype="float64"),
"continent": datasets.Value(dtype="string"),
"name": datasets.Value(dtype="string"),
"iso_a3": datasets.Value(dtype="string"),
"gdp_md_est": datasets.Value(dtype="int64"),
"geometry": datasets.Value("binary"),
# These are the features of your dataset like images, labels ...
}
),
)
def _split_generators(self, dl_manager: datasets.download.DownloadManager):
files = _URLS[self.config.name]
downloaded_files = dl_manager.download(files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']})
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_tables(self, filepaths: list[str]):
for file_idx, filepath in enumerate(filepaths):
with open(filepath, mode="rb") as f:
parquet_file = pq.ParquetFile(source=filepath)
for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
pa_table = pa.Table.from_batches([record_batch])
yield f"{file_idx}_{batch_idx}", pa_table