trying to iter over full validation
Browse files- DUDE_imdb_loader.py +117 -122
DUDE_imdb_loader.py
CHANGED
@@ -27,11 +27,14 @@ from io import BytesIO
|
|
27 |
tqdm.pandas()
|
28 |
from joblib import Parallel, delayed
|
29 |
|
30 |
-
|
31 |
import PyPDF2
|
32 |
|
33 |
-
from datasets import load_dataset_builder, load_dataset
|
34 |
from PIL import Image as PIL_Image
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
MAX_PAGES = 50
|
@@ -48,125 +51,88 @@ def save_json(json_path, data):
|
|
48 |
json.dump(data, f)
|
49 |
|
50 |
|
51 |
-
|
|
|
|
|
|
|
52 |
|
53 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
|
56 |
-
def pdf_to_images(document_filepath):
|
57 |
def images_to_pagenames(images, document_filepath, page_image_dir):
|
58 |
-
"/home/jordy/Downloads/DUDE_train-val-test_binaries/PDF/val/001d6f557c342ef5a67cd38a29da9e83.pdf"
|
59 |
-
|
60 |
page_image_names = []
|
61 |
for page_idx, page_image in enumerate(images):
|
62 |
page_image_name = document_filepath.replace("PDF", "images").replace(
|
63 |
".pdf", f"_{page_idx}.jpg"
|
64 |
)
|
65 |
-
|
66 |
-
# page_image_names.append(page_image_name.replace(page_images_dir, "")) #without dir
|
67 |
-
|
68 |
if not os.path.exists(page_image_name):
|
69 |
-
page_image.save(page_image_name)
|
70 |
return page_image_names
|
71 |
|
72 |
example = {}
|
73 |
-
example["
|
74 |
-
example["pages"] = 0
|
75 |
example["page_image_names"] = []
|
76 |
images = []
|
77 |
|
78 |
-
page_image_dir = "/".join(
|
79 |
if not os.path.exists(page_image_dir):
|
80 |
os.makedirs(page_image_dir)
|
81 |
|
82 |
-
# if len(
|
83 |
# logger.warning(f"too large document {len(example['document'])}")
|
84 |
# return example
|
85 |
-
try:
|
86 |
-
reader = PyPDF2.PdfReader(example["document"])
|
87 |
-
except Exception as e:
|
88 |
-
logger.warning(f"read_pdf {e}")
|
89 |
-
return example
|
90 |
-
|
91 |
reached_page_limit = False
|
92 |
-
page_iterator = reader.pages
|
93 |
-
|
94 |
-
for p, page in enumerate(page_iterator):
|
95 |
-
if reached_page_limit:
|
96 |
-
break
|
97 |
-
for image in page.images:
|
98 |
-
# try:
|
99 |
-
# except Exception as e:
|
100 |
-
# logger.warning(f"get_images {e}")
|
101 |
-
if len(images) == MAX_PAGES:
|
102 |
-
reached_page_limit = True
|
103 |
-
break
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
if len(images) == 0:
|
111 |
return example
|
112 |
|
113 |
-
example["page_image_names"] = images_to_pagenames(images,
|
114 |
|
115 |
return example
|
116 |
|
117 |
|
118 |
-
def pdf_to_images_block(document_paths_blocks):
|
119 |
new_doc_metadata = {}
|
120 |
for document_filepath in document_paths_blocks:
|
121 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
122 |
-
new_doc_metadata[docId] = pdf_to_images(document_filepath)
|
123 |
-
return new_doc_metadata
|
124 |
-
|
125 |
-
|
126 |
-
"""
|
127 |
-
def get_document_metadata(docs_metadata, docId, document_filepath):
|
128 |
-
|
129 |
-
if docId in docs_metadata and docs_metadata[docId]["num_pages"] != -1:
|
130 |
-
num_pages = docs_metadata[docId]["num_pages"]
|
131 |
-
page_image_names = docs_metadata[docId]["page_image_names"]
|
132 |
-
|
133 |
-
else:
|
134 |
-
try:
|
135 |
-
images = pdf2image.convert_from_path(document_filepath)
|
136 |
-
except:
|
137 |
-
print(docId)
|
138 |
-
return -1, -1
|
139 |
-
num_pages = len(images)
|
140 |
-
page_image_dir = ("/").join(
|
141 |
-
document_filepath.replace(documents_dir, page_images_dir).split("/")[:-1]
|
142 |
-
)
|
143 |
-
if not os.path.exists(page_image_dir):
|
144 |
-
os.makedirs(page_image_dir)
|
145 |
-
|
146 |
-
page_image_names = []
|
147 |
-
for page_idx, page_image in enumerate(images):
|
148 |
-
page_image_name = document_filepath.replace(documents_dir, page_images_dir).replace(
|
149 |
-
".pdf", f"_{page_idx}.jpg"
|
150 |
-
)
|
151 |
-
page_image_names.append(page_image_name.replace(page_images_dir, ""))
|
152 |
-
|
153 |
-
if not os.path.exists(page_image_name):
|
154 |
-
page_image.save(page_image_name)
|
155 |
-
|
156 |
-
return num_pages, page_image_names
|
157 |
-
|
158 |
-
|
159 |
-
def get_document_metadata_block(docs_metadata, documents_path_dict, documents_blocks):
|
160 |
-
new_doc_metadata = {}
|
161 |
-
for docId in documents_blocks:
|
162 |
-
document_filepath = documents_path_dict[docId]
|
163 |
-
num_pages, page_image_names = get_document_metadata(
|
164 |
-
docs_metadata, docId, document_filepath
|
165 |
-
)
|
166 |
-
new_doc_metadata[docId] = {"num_pages": num_pages, "page_image_names": page_image_names}
|
167 |
-
|
168 |
return new_doc_metadata
|
169 |
-
"""
|
170 |
|
171 |
|
172 |
def parse_textract_bbox(box):
|
@@ -195,7 +161,11 @@ def parse_azure_box(box, page_width, page_height):
|
|
195 |
|
196 |
def get_ocr_information(ocr_path, num_pages):
|
197 |
ocr_info = load_json(ocr_path)
|
198 |
-
|
|
|
|
|
|
|
|
|
199 |
|
200 |
page_ocr_tokens = [[] for page_ix in range(num_pages)]
|
201 |
page_ocr_boxes = [[] for page_ix in range(num_pages)]
|
@@ -203,16 +173,18 @@ def get_ocr_information(ocr_path, num_pages):
|
|
203 |
for ocr_extraction in ocr_block["Blocks"]:
|
204 |
if ocr_extraction["BlockType"] == "WORD":
|
205 |
text = ocr_extraction["Text"].lower()
|
206 |
-
bounding_box = parse_textract_bbox(
|
|
|
|
|
207 |
page = ocr_extraction["Page"] - 1
|
208 |
|
209 |
page_ocr_tokens[page].append(text)
|
210 |
page_ocr_boxes[page].append(bounding_box)
|
211 |
|
|
|
212 |
for page in range(num_pages):
|
213 |
page_ocr_boxes[page] = np.array(page_ocr_boxes[page])
|
214 |
-
|
215 |
-
page_ocr_boxes = page_ocr_boxes
|
216 |
return page_ocr_tokens, page_ocr_boxes
|
217 |
|
218 |
|
@@ -240,13 +212,21 @@ def format_answers(answers_list):
|
|
240 |
|
241 |
|
242 |
def create_imdb_record_from_json(
|
243 |
-
record, documents_metadata,
|
244 |
):
|
245 |
|
246 |
-
docId = record["docId"]
|
247 |
# document_filepath = documents_dict[docId]
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
if include_answers:
|
252 |
answers = format_answers(record["answers"])
|
@@ -276,17 +256,17 @@ def create_imdb_record_from_json(
|
|
276 |
|
277 |
|
278 |
def create_imdb_from_json(
|
279 |
-
data, documents_metadata,
|
280 |
):
|
281 |
imdb_header = create_header(split, version, include_answers)
|
282 |
|
283 |
imdb_records = []
|
284 |
for record in tqdm(data):
|
285 |
-
|
286 |
-
|
287 |
-
record, documents_metadata, documents_ocr_information, split, include_answers
|
288 |
-
)
|
289 |
)
|
|
|
|
|
290 |
|
291 |
imdb = [imdb_header] + imdb_records
|
292 |
|
@@ -299,7 +279,7 @@ if __name__ == "__main__":
|
|
299 |
"DUDE",
|
300 |
data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries",
|
301 |
)
|
302 |
-
splits =
|
303 |
|
304 |
for split in splits:
|
305 |
if split != "val":
|
@@ -308,7 +288,7 @@ if __name__ == "__main__":
|
|
308 |
split_indices = []
|
309 |
OCR_paths = []
|
310 |
document_paths = []
|
311 |
-
for i, x in enumerate(dataset):
|
312 |
if x["data_split"] != split:
|
313 |
continue
|
314 |
if x["document"] not in document_paths:
|
@@ -316,27 +296,29 @@ if __name__ == "__main__":
|
|
316 |
OCR_paths.append(x["OCR"])
|
317 |
split_indices.append(i)
|
318 |
|
319 |
-
document_paths = document_paths[:30]
|
320 |
-
OCR_paths = OCR_paths[:30]
|
321 |
|
322 |
# 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
|
323 |
documents_metadata_filename = f"{split}-documents_metadata.json"
|
324 |
if os.path.exists(documents_metadata_filename):
|
|
|
325 |
documents_metadata = load_json(documents_metadata_filename)
|
326 |
else:
|
327 |
documents_metadata = {}
|
328 |
-
num_jobs =
|
329 |
block_size = int(len(document_paths) / num_jobs) + 1
|
330 |
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
|
331 |
-
|
332 |
-
document_paths[block_size * i : block_size * i + block_size]
|
|
|
333 |
]
|
334 |
print(
|
335 |
"chunksize",
|
336 |
-
len(set([docId for doc_block in
|
337 |
)
|
338 |
parallel_results = Parallel(n_jobs=num_jobs)(
|
339 |
-
delayed(
|
340 |
for i in range(num_jobs)
|
341 |
)
|
342 |
|
@@ -350,7 +332,8 @@ if __name__ == "__main__":
|
|
350 |
# 2. Process OCR to obtain doc_ocr_info
|
351 |
documents_ocr_filename = f"{split}-documents_ocr.json"
|
352 |
|
353 |
-
if os.path.exists(documents_ocr_filename):
|
|
|
354 |
documents_ocr_info = load_json(documents_ocr_filename)
|
355 |
else:
|
356 |
documents_ocr_info = {}
|
@@ -359,27 +342,39 @@ if __name__ == "__main__":
|
|
359 |
|
360 |
for i, document_filepath in enumerate(document_paths):
|
361 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
369 |
|
370 |
save_json(documents_ocr_filename, documents_ocr_info)
|
371 |
|
372 |
imdb = create_imdb_from_json(
|
373 |
-
dataset
|
374 |
documents_metadata=documents_metadata,
|
375 |
-
|
376 |
split=split,
|
377 |
version="0.1",
|
378 |
include_answers=True,
|
379 |
)
|
380 |
-
np.save(f"{split}_imdb.npy", imdb)
|
|
|
|
|
381 |
|
382 |
-
|
383 |
|
384 |
# page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
|
385 |
# if not os.path.exists(page_image_dir):
|
@@ -411,7 +406,7 @@ if __name__ == "__main__":
|
|
411 |
train_imdb = create_imdb_from_json(
|
412 |
train_data,
|
413 |
documents_metadata=documents_metadata,
|
414 |
-
|
415 |
split="train",
|
416 |
version="0.1",
|
417 |
include_answers=True,
|
|
|
27 |
tqdm.pandas()
|
28 |
from joblib import Parallel, delayed
|
29 |
|
30 |
+
import pdf2image
|
31 |
import PyPDF2
|
32 |
|
|
|
33 |
from PIL import Image as PIL_Image
|
34 |
+
from datasets import load_dataset_builder, load_dataset, logging
|
35 |
+
|
36 |
+
|
37 |
+
logger = logging.get_logger(__name__)
|
38 |
|
39 |
|
40 |
MAX_PAGES = 50
|
|
|
51 |
json.dump(data, f)
|
52 |
|
53 |
|
54 |
+
def get_images_pdf2image(document_filepath):
|
55 |
+
info = pdf2image.pdfinfo_from_path(document_filepath, userpw=None, poppler_path=None)
|
56 |
+
maxPages = info["Pages"]
|
57 |
+
maxPages = min(maxPages, maxPages)
|
58 |
|
59 |
+
# logger.info(f"{document_filepath} has {str(maxPages)} pages")
|
60 |
+
images = []
|
61 |
+
for page in range(1, maxPages + 1, 10):
|
62 |
+
images.extend(
|
63 |
+
pdf2image.convert_from_path(
|
64 |
+
document_filepath, first_page=page, last_page=min(page + 10 - 1, maxPages)
|
65 |
+
)
|
66 |
+
)
|
67 |
+
return images
|
68 |
|
69 |
|
70 |
+
def pdf_to_images(document_filepath, converter="PyPDF2"):
|
71 |
def images_to_pagenames(images, document_filepath, page_image_dir):
|
|
|
|
|
72 |
page_image_names = []
|
73 |
for page_idx, page_image in enumerate(images):
|
74 |
page_image_name = document_filepath.replace("PDF", "images").replace(
|
75 |
".pdf", f"_{page_idx}.jpg"
|
76 |
)
|
77 |
+
page_image_names.append(page_image_name.replace(page_image_dir, "")) # without dir
|
|
|
|
|
78 |
if not os.path.exists(page_image_name):
|
79 |
+
page_image.convert("RGB").save(page_image_name)
|
80 |
return page_image_names
|
81 |
|
82 |
example = {}
|
83 |
+
example["num_pages"] = 0
|
|
|
84 |
example["page_image_names"] = []
|
85 |
images = []
|
86 |
|
87 |
+
page_image_dir = "/".join(document_filepath.split("/")[:-1]).replace("PDF", "images")
|
88 |
if not os.path.exists(page_image_dir):
|
89 |
os.makedirs(page_image_dir)
|
90 |
|
91 |
+
# if len(document_filepath) > MAX_PDF_SIZE:
|
92 |
# logger.warning(f"too large document {len(example['document'])}")
|
93 |
# return example
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
reached_page_limit = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
+
if converter == "PyPDF2":
|
97 |
+
try:
|
98 |
+
reader = PyPDF2.PdfReader(document_filepath)
|
99 |
+
except Exception as e:
|
100 |
+
logger.warning(f"read_pdf {e}")
|
101 |
+
return example
|
102 |
+
|
103 |
+
for p, page in enumerate(reader.pages):
|
104 |
+
if reached_page_limit:
|
105 |
+
break
|
106 |
+
try:
|
107 |
+
for image in page.images:
|
108 |
+
if len(images) == MAX_PAGES:
|
109 |
+
reached_page_limit = True
|
110 |
+
break
|
111 |
+
im = PIL_Image.open(BytesIO(image.data))
|
112 |
+
if im.width < MIN_WIDTH and im.height < MIN_HEIGHT:
|
113 |
+
continue
|
114 |
+
images.append(im)
|
115 |
+
except Exception as e:
|
116 |
+
logger.warning(f"get_images {e}")
|
117 |
+
|
118 |
+
elif converter == "pdf2image":
|
119 |
+
images = get_images_pdf2image(document_filepath)
|
120 |
+
|
121 |
+
example["num_pages"] = len(images)
|
122 |
if len(images) == 0:
|
123 |
return example
|
124 |
|
125 |
+
example["page_image_names"] = images_to_pagenames(images, document_filepath, page_image_dir)
|
126 |
|
127 |
return example
|
128 |
|
129 |
|
130 |
+
def pdf_to_images_block(document_paths_blocks, converter):
|
131 |
new_doc_metadata = {}
|
132 |
for document_filepath in document_paths_blocks:
|
133 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
134 |
+
new_doc_metadata[docId] = pdf_to_images(document_filepath, converter=converter)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
return new_doc_metadata
|
|
|
136 |
|
137 |
|
138 |
def parse_textract_bbox(box):
|
|
|
161 |
|
162 |
def get_ocr_information(ocr_path, num_pages):
|
163 |
ocr_info = load_json(ocr_path)
|
164 |
+
|
165 |
+
ocr_pages = ocr_info[0]["DocumentMetadata"]["Pages"]
|
166 |
+
|
167 |
+
if num_pages != ocr_pages:
|
168 |
+
raise AssertionError("Pages from images and OCR not matching, should go for pdf2image")
|
169 |
|
170 |
page_ocr_tokens = [[] for page_ix in range(num_pages)]
|
171 |
page_ocr_boxes = [[] for page_ix in range(num_pages)]
|
|
|
173 |
for ocr_extraction in ocr_block["Blocks"]:
|
174 |
if ocr_extraction["BlockType"] == "WORD":
|
175 |
text = ocr_extraction["Text"].lower()
|
176 |
+
bounding_box = parse_textract_bbox(
|
177 |
+
ocr_extraction["Geometry"]["BoundingBox"]
|
178 |
+
).tolist()
|
179 |
page = ocr_extraction["Page"] - 1
|
180 |
|
181 |
page_ocr_tokens[page].append(text)
|
182 |
page_ocr_boxes[page].append(bounding_box)
|
183 |
|
184 |
+
"""
|
185 |
for page in range(num_pages):
|
186 |
page_ocr_boxes[page] = np.array(page_ocr_boxes[page])
|
187 |
+
"""
|
|
|
188 |
return page_ocr_tokens, page_ocr_boxes
|
189 |
|
190 |
|
|
|
212 |
|
213 |
|
214 |
def create_imdb_record_from_json(
|
215 |
+
record, documents_metadata, documents_ocr_info, split, include_answers
|
216 |
):
|
217 |
|
218 |
+
docId = record["docId"].split("_")[0]
|
219 |
# document_filepath = documents_dict[docId]
|
220 |
+
try:
|
221 |
+
num_pages, page_image_names = get_document_info(documents_metadata, docId)
|
222 |
+
document_ocr_info = documents_ocr_info[docId]
|
223 |
+
except Exception as e:
|
224 |
+
print(
|
225 |
+
"Missing: ",
|
226 |
+
e,
|
227 |
+
docId,
|
228 |
+
)
|
229 |
+
return {}
|
230 |
|
231 |
if include_answers:
|
232 |
answers = format_answers(record["answers"])
|
|
|
256 |
|
257 |
|
258 |
def create_imdb_from_json(
|
259 |
+
data, documents_metadata, documents_ocr_info, split, version, include_answers=True
|
260 |
):
|
261 |
imdb_header = create_header(split, version, include_answers)
|
262 |
|
263 |
imdb_records = []
|
264 |
for record in tqdm(data):
|
265 |
+
imdb_record = create_imdb_record_from_json(
|
266 |
+
record, documents_metadata, documents_ocr_info, split, include_answers
|
|
|
|
|
267 |
)
|
268 |
+
if imdb_record:
|
269 |
+
imdb_records.append(imdb_record)
|
270 |
|
271 |
imdb = [imdb_header] + imdb_records
|
272 |
|
|
|
279 |
"DUDE",
|
280 |
data_dir="/home/jordy/Downloads/DUDE_train-val-test_binaries",
|
281 |
)
|
282 |
+
splits = dataset.keys()
|
283 |
|
284 |
for split in splits:
|
285 |
if split != "val":
|
|
|
288 |
split_indices = []
|
289 |
OCR_paths = []
|
290 |
document_paths = []
|
291 |
+
for i, x in enumerate(dataset[split]):
|
292 |
if x["data_split"] != split:
|
293 |
continue
|
294 |
if x["document"] not in document_paths:
|
|
|
296 |
OCR_paths.append(x["OCR"])
|
297 |
split_indices.append(i)
|
298 |
|
299 |
+
# document_paths = document_paths[:30]
|
300 |
+
# OCR_paths = OCR_paths[:30]
|
301 |
|
302 |
# 1. PDF to image dir and collect document metadata (num_pages, page_image_names)
|
303 |
documents_metadata_filename = f"{split}-documents_metadata.json"
|
304 |
if os.path.exists(documents_metadata_filename):
|
305 |
+
print(f"Loading from disk: {documents_metadata_filename}")
|
306 |
documents_metadata = load_json(documents_metadata_filename)
|
307 |
else:
|
308 |
documents_metadata = {}
|
309 |
+
num_jobs = 1
|
310 |
block_size = int(len(document_paths) / num_jobs) + 1
|
311 |
print(f"{block_size} * {num_jobs} = {block_size*num_jobs} ({len(document_paths)})")
|
312 |
+
document_blocks = [
|
313 |
+
document_paths[block_size * i : block_size * i + block_size]
|
314 |
+
for i in range(num_jobs)
|
315 |
]
|
316 |
print(
|
317 |
"chunksize",
|
318 |
+
len(set([docId for doc_block in document_blocks for docId in doc_block])),
|
319 |
)
|
320 |
parallel_results = Parallel(n_jobs=num_jobs)(
|
321 |
+
delayed(pdf_to_images_block)(document_blocks[i], "pdf2image")
|
322 |
for i in range(num_jobs)
|
323 |
)
|
324 |
|
|
|
332 |
# 2. Process OCR to obtain doc_ocr_info
|
333 |
documents_ocr_filename = f"{split}-documents_ocr.json"
|
334 |
|
335 |
+
if os.path.exists(documents_ocr_filename) and False:
|
336 |
+
print(f"Loading from disk: {documents_ocr_filename}")
|
337 |
documents_ocr_info = load_json(documents_ocr_filename)
|
338 |
else:
|
339 |
documents_ocr_info = {}
|
|
|
342 |
|
343 |
for i, document_filepath in enumerate(document_paths):
|
344 |
docId = document_filepath.split("/")[-1].replace(".pdf", "")
|
345 |
+
try:
|
346 |
+
ocr_tokens, ocr_boxes = get_ocr_information(
|
347 |
+
OCR_paths[i], documents_metadata[docId]["num_pages"]
|
348 |
+
)
|
349 |
+
documents_ocr_info[docId] = {"ocr_tokens": ocr_tokens, "ocr_boxes": ocr_boxes}
|
350 |
+
except AssertionError as e:
|
351 |
+
print(f"image2pages issue: {e}")
|
352 |
+
error_ocr.append(docId)
|
353 |
+
except IndexError as e:
|
354 |
+
print(f"pages issue: {e}")
|
355 |
+
error_ocr.append(docId)
|
356 |
+
except FileNotFoundError:
|
357 |
+
print(f"FileNotFoundError issue: {e}")
|
358 |
+
no_ocr.append(docId)
|
359 |
+
except KeyError:
|
360 |
+
print(f"Keyerror issue: {e}")
|
361 |
+
error_ocr.append(docId)
|
362 |
|
363 |
save_json(documents_ocr_filename, documents_ocr_info)
|
364 |
|
365 |
imdb = create_imdb_from_json(
|
366 |
+
dataset[split], #.select(split_indices),
|
367 |
documents_metadata=documents_metadata,
|
368 |
+
documents_ocr_info=documents_ocr_info,
|
369 |
split=split,
|
370 |
version="0.1",
|
371 |
include_answers=True,
|
372 |
)
|
373 |
+
np.save(f"{split}_imdb.npy", imdb) # dump to lerna
|
374 |
+
|
375 |
+
import pdb
|
376 |
|
377 |
+
pdb.set_trace() # breakpoint 930f4f6a //
|
378 |
|
379 |
# page_image_dir = '/'.join(dataset['val']['document'][0].split("/")[:-1]).replace('PDF', 'images')
|
380 |
# if not os.path.exists(page_image_dir):
|
|
|
406 |
train_imdb = create_imdb_from_json(
|
407 |
train_data,
|
408 |
documents_metadata=documents_metadata,
|
409 |
+
documents_ocr_info=doc_ocr_info,
|
410 |
split="train",
|
411 |
version="0.1",
|
412 |
include_answers=True,
|