|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""DUDE dataset loader""" |
|
|
|
import copy |
|
import json |
|
import os |
|
from typing import List, Literal |
|
|
|
import datasets |
|
import pdf2image |
|
from tqdm import tqdm |
|
|
|
_CITATION = """ |
|
@inproceedings{dude2023icdar, |
|
title={ICDAR 2023 Challenge on Document UnderstanDing of Everything (DUDE)}, |
|
author={Van Landeghem, Jordy et . al.}, |
|
booktitle={Proceedings of the ICDAR}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
DUDE requires models to reason and understand about document layouts in multi-page images/PDFs to answer questions about them. |
|
Specifically, models need to incorporate a new modality of layout present in the images/PDFs and reason |
|
over it to answer DUDE questions. |
|
""" |
|
|
|
_HOMEPAGE = "https://rrc.cvc.uab.es/?ch=23" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_SPLITS = ["train", "val", "test"] |
|
|
|
_URLS = { |
|
"binaries": "https://huggingface.co/datasets/jordyvl/DUDE_loader/resolve/main/data/DUDE_train-val-test_binaries.tar.gz", |
|
"annotations": "https://zenodo.org/record/7763635/files/2023-03-23_DUDE_gt_test_PUBLIC.json?download=1" |
|
|
|
} |
|
|
|
SKIP_DOC_IDS = [ |
|
"nan", |
|
"ef03364aa27a0987c9870472e312aceb", |
|
"5c5a5880e6a73b4be2315d506ab0b15b", |
|
] |
|
|
|
|
|
def parse_bbox(bbox): |
|
|
|
if bbox in [[], [[]]]: |
|
return None |
|
|
|
answers_page_bounding_boxes = [] |
|
|
|
if isinstance(bbox[0], list): |
|
bbox = bbox[0] |
|
|
|
keys = ["left", "top", "width", "height", "page"] |
|
|
|
for page_bb in bbox: |
|
if len(page_bb) == 0: |
|
continue |
|
page_bb = {key: page_bb[key] for key in keys} |
|
answers_page_bounding_boxes.append(page_bb) |
|
return answers_page_bounding_boxes |
|
|
|
|
|
def batched_conversion(pdf_file): |
|
info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None) |
|
maxPages = info["Pages"] |
|
|
|
images = [] |
|
|
|
for page in range(1, maxPages + 1, 10): |
|
images.extend( |
|
pdf2image.convert_from_path( |
|
pdf_file, |
|
dpi=200, |
|
first_page=page, |
|
last_page=min(page + 10 - 1, maxPages), |
|
) |
|
) |
|
return images |
|
|
|
|
|
def open_pdf_binary(pdf_file): |
|
with open(pdf_file, "rb") as f: |
|
return f.read() |
|
|
|
|
|
class DUDEConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for DUDE.""" |
|
|
|
def __init__( |
|
self, |
|
binary_mode: bool = False, |
|
ocr_engine: Literal["Azure", "Amazon", "Tesseract"] = "Amazon", |
|
format: Literal["original", "due"] = "original", |
|
**kwargs, |
|
): |
|
"""BuilderConfig for DUDE. |
|
Args: |
|
binary_mode: `boolean`, load binary PDFs/OCR or pass along paths on local file system |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(DUDEConfig, self).__init__(description=_DESCRIPTION, **kwargs) |
|
self.binary_mode = binary_mode |
|
self.ocr_engine = ocr_engine |
|
self.format = format |
|
|
|
|
|
def builder_configs(version): |
|
configurations = [] |
|
for binary_mode in [True, False]: |
|
for ocr_engine in ["Azure", "Amazon", "Tesseract"]: |
|
for format in ["original", "due"]: |
|
binary_name = "bin_" if binary_mode else "" |
|
configurations.append( |
|
DUDEConfig( |
|
name=f"{binary_name}{ocr_engine}_{format}", |
|
version=version, |
|
binary_mode=binary_mode, |
|
ocr_engine=ocr_engine, |
|
format=format, |
|
) |
|
) |
|
return configurations |
|
|
|
|
|
class DUDE(datasets.GeneratorBasedBuilder): |
|
"""DUDE dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.7") |
|
|
|
BUILDER_CONFIGS = builder_configs(VERSION) |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"Amazon_original" |
|
) |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"docId": datasets.Value("string"), |
|
"questionId": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answers": datasets.Sequence(datasets.Value("string")), |
|
"answers_page_bounding_boxes": datasets.Sequence( |
|
{ |
|
"left": datasets.Value("int32"), |
|
"top": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
"page": datasets.Value("int32"), |
|
} |
|
), |
|
"answers_variants": datasets.Sequence(datasets.Value("string")), |
|
"answer_type": datasets.Value("string"), |
|
"data_split": datasets.Value("string"), |
|
"document": datasets.Value("binary") |
|
if self.config.binary_mode |
|
else datasets.Value("string"), |
|
"OCR": datasets.Value("binary") |
|
if self.config.binary_mode |
|
else datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
|
|
if "blind" in _URLS and os.path.exists(_URLS[f"blind"]): |
|
annotations = json.load(open(_URLS[f"blind"], "r")) |
|
else: |
|
annotations = json.load(open(_URLS[f"annotations"], "r")) |
|
|
|
if self.config.data_dir: |
|
binary_extraction_path = self.config.data_dir |
|
else: |
|
binaries_path = dl_manager.download(_URLS["binaries"]) |
|
binary_extraction_path = dl_manager.extract(binaries_path) |
|
|
|
|
|
splits = [] |
|
for split in _SPLITS: |
|
splits.append( |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"binary_extraction_path": binary_extraction_path, |
|
"annotations": annotations, |
|
"split": split, |
|
}, |
|
) |
|
) |
|
return splits |
|
|
|
def _generate_examples(self, binary_extraction_path, annotations, split): |
|
def retrieve_doc(docid): |
|
extracted_path = os.path.join( |
|
binary_extraction_path, "PDF", split, docid + ".pdf" |
|
) |
|
return extracted_path |
|
|
|
def retrieve_OCR(docid, ocr_engine="Amazon", format="original"): |
|
extracted_path = os.path.join( |
|
binary_extraction_path, "OCR", ocr_engine, docid + f"_{format}.json" |
|
) |
|
return extracted_path |
|
|
|
split_condition = ( |
|
lambda x, split: bool(x["data_split"] == split) |
|
if split in ["train", "val"] |
|
else bool(split in x["data_split"]) |
|
) |
|
annotations = [x for x in annotations if split_condition(x, split)] |
|
|
|
for i, a in enumerate(annotations): |
|
if a["docId"] in SKIP_DOC_IDS: |
|
continue |
|
a = dict(a) |
|
a["data_split"] = split |
|
if not "answers" in a.keys(): |
|
a["answers"] = None |
|
a["answers_variants"] = None |
|
a["answer_type"] = None |
|
a["answers_page_bounding_boxes"] = None |
|
else: |
|
a["answers_page_bounding_boxes"] = parse_bbox( |
|
a.get("answers_page_bounding_boxes", []) |
|
) |
|
docpath = retrieve_doc(a["docId"]) |
|
ocrpath = retrieve_OCR(a["docId"]) |
|
if self.config.binary_mode: |
|
with open(docpath, "rb") as f, open(ocrpath, "rb") as g: |
|
a["document"] = f.read() |
|
a["OCR"] = g.read() |
|
else: |
|
a["document"] = docpath |
|
a["OCR"] = ocrpath |
|
yield i, a |
|
|