File size: 2,428 Bytes
0ae3740
 
 
 
 
 
 
 
 
 
 
8af70ea
 
 
 
 
 
 
 
 
0ae3740
 
 
8af70ea
 
0ae3740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

import datasets
import pyarrow as pa
import pyarrow.parquet as pq
logger = datasets.utils.logging.get_logger(__name__)



_URLS = { "train": "https://huggingface.co/datasets/moska/test_parquet/resolve/main/data/example.parquet" }


class ParquetDatasetConfig(datasets.BuilderConfig):
    """BuilderConfig """

    def __init__(self, **kwargs):
        """BuilderConfig.
        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(ParquetDatasetConfig, self).__init__(**kwargs)

class ParquetDataset(datasets.ArrowBasedBuilder):
    BUILDER_CONFIGS = [
        ParquetDatasetConfig(
            name="parquet",
            version=datasets.Version(version="0.0.1"),
            description=f"test_parquet dataset.",
        )

    ]



    def _info(self):
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description="reading parquet format.",
            # This defines the different columns of the dataset and their types
            features=datasets.Features(
                {   "pop_est": datasets.Value(dtype="float64"),
                    "continent": datasets.Value(dtype="string"),
                    "name": datasets.Value(dtype="string"),
                    "iso_a3": datasets.Value(dtype="string"),
                    "gdp_md_est": datasets.Value(dtype="int64"),
                    "geometry": datasets.Value("binary"),
                    # These are the features of your dataset like images, labels ...
                }
            ),
        )


    def _split_generators(self, dl_manager: datasets.download.DownloadManager):
        files = _URLS[self.config.name]
        downloaded_files = dl_manager.download(files)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']})
        ]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_tables(self, filepaths: list[str]):
        for file_idx, filepath in enumerate(filepaths):
            with open(filepath, mode="rb") as f:
                parquet_file = pq.ParquetFile(source=filepath)
                for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
                    pa_table = pa.Table.from_batches([record_batch])
                    yield f"{file_idx}_{batch_idx}", pa_table