File size: 5,466 Bytes
8990d23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b013ca
 
8990d23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b013ca
8990d23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b013ca
8990d23
 
 
0b013ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""RVL-CDIP-N_mp (Ryerson Vision Lab Complex Document Information Processing) -New -Multipage dataset"""


import os
import datasets
from pathlib import Path
from tqdm import tqdm
import pdf2image

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_MODE = "binary"

_CITATION = """\
@inproceedings{larson2022evaluating,
	title={Evaluating Out-of-Distribution Performance on Document Image Classifiers},
	author={Larson, Stefan and Lim, Gordon and Ai, Yutong and Kuang, David and Leach, Kevin},
	booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
	year={2022}
}
    
@inproceedings{bdpc,
    title = {Beyond Document Page Classification},
    author = {Anonymous},
    booktitle = {Under Review},
    year = {2023}
}
"""

_DESCRIPTION = """\
The RVL-CDIP-N (Ryerson Vision Lab Complex Document Information Processing) dataset consists of newly gathered documents in 16 classes 
There are 991 documents for testing purposes. There were 10 documents from the original dataset that could not be retrieved based on the metadata or were out-of-scope (language). 
"""


_HOMEPAGE = "https://www.cs.cmu.edu/~aharley/rvl-cdip/"
_LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"


SOURCE = "bdpc/rvl_cdip_n_mp"
_URL = f"https://huggingface.co/datasets/{SOURCE}/resolve/main/data.tar.gz"
_BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP-NO/RVL-CDIP-N_pdf/data"

_CLASSES = [
    "letter",
    "form",
    "email",
    "handwritten",
    "advertisement",
    "scientific report",
    "scientific publication",
    "specification",
    "file folder",
    "news article",
    "budget",
    "invoice",
    "presentation",
    "questionnaire",
    "resume",
    "memo",
]


def batched_conversion(pdf_file):
    info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
    maxPages = info["Pages"]

    logger.info(f"{pdf_file} has {str(maxPages)} pages")

    images = []

    for page in range(1, maxPages + 1, 10):
        images.extend(
            pdf2image.convert_from_path(pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages))
        )
    return images


def open_pdf_binary(pdf_file):
    with open(pdf_file, "rb") as f:
        return f.read()


class RvlCdipNMp(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    DEFAULT_CONFIG_NAME = "default"

    def _info(self):
        if isinstance(self.config.data_dir, str):
            folder = self.config.data_dir  # contains the folder structure at someone local disk
        else:
            folder = _URL if not os.path.exists(_BACKOFF_folder) else _BACKOFF_folder
        self.config.data_dir = folder

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "file": datasets.Value("binary"),
                    "labels": datasets.features.ClassLabel(names=_CLASSES),
                }
            ),
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
            task_templates=None,
        )

    def _split_generators(self, dl_manager):
        if self.config.data_dir.endswith(".tar.gz"):
            archive_path = dl_manager.download(self.config.data_dir)
            data_files = dl_manager.iter_archive(archive_path)
        else:
            data_files = self.config.data_dir

        return [datasets.SplitGenerator(name="test", gen_kwargs={"archive_path": data_files})]

    def generate_example(self, path, file=None):
        labels = self.info.features["labels"]
        extensions = {".pdf", ".PDF"}

        path = Path(path)  # ensure path is a pathlib object
        if path.suffix in extensions:
            if file is None:
                if _MODE == "binary":
                    file = open_pdf_binary(path)
                    # batched_conversion(path)
                else:
                    file = path

            a = dict(
                id=path.name,
                file=file,
                labels=labels.encode_example(path.parent.name.lower()),
            )

        return path.name, a

    def _generate_examples(self, archive_path):
        if self.config.data_dir.endswith(".tar.gz"):
            iterator = archive_path
        else:
            iterator = Path(archive_path).glob("**/*")

        for i, path in tqdm(enumerate(iterator), desc=f"{archive_path}"):
            file = None
            if isinstance(path, tuple):
                path = path[0]
                file = path[1]
            try:
                yield self.generate_example(path, file=file)
            except Exception as e:
                logger.warning(f"{e} failed to parse {path}")