small edit to imdb loader
Browse files- DUDE_imdb_loader.py +24 -12
DUDE_imdb_loader.py
CHANGED
@@ -36,8 +36,7 @@ from datasets import load_dataset_builder, load_dataset, logging
|
|
36 |
|
37 |
logger = logging.get_logger(__name__)
|
38 |
|
39 |
-
|
40 |
-
PIL.Image.MAX_IMAGE_PIXELS = None #933120000
|
41 |
|
42 |
MAX_PAGES = 50
|
43 |
MAX_PDF_SIZE = 100000000 # almost 100MB
|
@@ -53,19 +52,22 @@ def save_json(json_path, data):
|
|
53 |
json.dump(data, f)
|
54 |
|
55 |
|
56 |
-
def get_images_pdf2image(document_filepath):
|
57 |
info = pdf2image.pdfinfo_from_path(document_filepath, userpw=None, poppler_path=None)
|
58 |
maxPages = info["Pages"]
|
59 |
maxPages = min(maxPages, maxPages)
|
60 |
|
61 |
# logger.info(f"{document_filepath} has {str(maxPages)} pages")
|
62 |
images = []
|
63 |
-
for page in range(1, maxPages + 1,
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
67 |
)
|
68 |
-
|
|
|
69 |
return images
|
70 |
|
71 |
|
@@ -76,7 +78,7 @@ def pdf_to_images(document_filepath, converter="PyPDF2"):
|
|
76 |
page_image_name = document_filepath.replace("PDF", "images").replace(
|
77 |
".pdf", f"_{page_idx}.jpg"
|
78 |
)
|
79 |
-
page_image_names.append(page_image_name.replace(page_image_dir,
|
80 |
if not os.path.exists(page_image_name):
|
81 |
page_image.convert("RGB").save(page_image_name)
|
82 |
return page_image_names
|
@@ -214,7 +216,7 @@ def format_answers(answers_list):
|
|
214 |
|
215 |
|
216 |
def create_imdb_record_from_json(
|
217 |
-
record, documents_metadata, documents_ocr_info, split, include_answers
|
218 |
):
|
219 |
|
220 |
docId = record["docId"].split("_")[0]
|
@@ -235,6 +237,13 @@ def create_imdb_record_from_json(
|
|
235 |
else:
|
236 |
answers = None
|
237 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
imdb_record = {
|
239 |
"question_id": record["questionId"],
|
240 |
"question": record["question"],
|
@@ -361,10 +370,12 @@ if __name__ == "__main__":
|
|
361 |
|
362 |
save_json(documents_ocr_filename, documents_ocr_info)
|
363 |
|
364 |
-
imdb_filename = f"{split}
|
365 |
if os.path.exists(imdb_filename):
|
366 |
print(f"Loading from disk: {imdb_filename}")
|
367 |
-
imdb = np.load(imdb_filename)
|
|
|
|
|
368 |
|
369 |
else:
|
370 |
imdb = create_imdb_from_json(
|
@@ -376,3 +387,4 @@ if __name__ == "__main__":
|
|
376 |
include_answers=True,
|
377 |
)
|
378 |
np.save(imdb_filename, imdb)
|
|
|
|
36 |
|
37 |
logger = logging.get_logger(__name__)
|
38 |
|
39 |
+
PIL_Image.MAX_IMAGE_PIXELS = None #933120000
|
|
|
40 |
|
41 |
MAX_PAGES = 50
|
42 |
MAX_PDF_SIZE = 100000000 # almost 100MB
|
|
|
52 |
json.dump(data, f)
|
53 |
|
54 |
|
55 |
+
def get_images_pdf2image(document_filepath, chunksize=10):
|
56 |
info = pdf2image.pdfinfo_from_path(document_filepath, userpw=None, poppler_path=None)
|
57 |
maxPages = info["Pages"]
|
58 |
maxPages = min(maxPages, maxPages)
|
59 |
|
60 |
# logger.info(f"{document_filepath} has {str(maxPages)} pages")
|
61 |
images = []
|
62 |
+
for page in range(1, maxPages + 1, chunksize):
|
63 |
+
try:
|
64 |
+
images.extend(
|
65 |
+
pdf2image.convert_from_path(
|
66 |
+
document_filepath, first_page=page, last_page=min(page + chunksize - 1, maxPages)
|
67 |
+
)
|
68 |
)
|
69 |
+
except Exception as e:
|
70 |
+
logger.warning(f"page: {page} get_images {e}")
|
71 |
return images
|
72 |
|
73 |
|
|
|
78 |
page_image_name = document_filepath.replace("PDF", "images").replace(
|
79 |
".pdf", f"_{page_idx}.jpg"
|
80 |
)
|
81 |
+
page_image_names.append(page_image_name.replace(page_image_dir, page_image_dir.split('/')[-1])) # without dir
|
82 |
if not os.path.exists(page_image_name):
|
83 |
page_image.convert("RGB").save(page_image_name)
|
84 |
return page_image_names
|
|
|
216 |
|
217 |
|
218 |
def create_imdb_record_from_json(
|
219 |
+
record, documents_metadata, documents_ocr_info, split, include_answers, include_variants=False
|
220 |
):
|
221 |
|
222 |
docId = record["docId"].split("_")[0]
|
|
|
237 |
else:
|
238 |
answers = None
|
239 |
|
240 |
+
if include_variants:
|
241 |
+
import pdb; pdb.set_trace() # breakpoint 03e74e0e //
|
242 |
+
|
243 |
+
|
244 |
+
#fix if wrongly saved pagenames
|
245 |
+
page_image_names = [split+image_name for image_name in page_image_names if image_name.startswith('/')]
|
246 |
+
|
247 |
imdb_record = {
|
248 |
"question_id": record["questionId"],
|
249 |
"question": record["question"],
|
|
|
370 |
|
371 |
save_json(documents_ocr_filename, documents_ocr_info)
|
372 |
|
373 |
+
imdb_filename = f"imdb_{split}.npy"
|
374 |
if os.path.exists(imdb_filename):
|
375 |
print(f"Loading from disk: {imdb_filename}")
|
376 |
+
imdb = np.load(imdb_filename, allow_pickle=True)
|
377 |
+
|
378 |
+
import pdb; pdb.set_trace() # breakpoint ff49174b //
|
379 |
|
380 |
else:
|
381 |
imdb = create_imdb_from_json(
|
|
|
387 |
include_answers=True,
|
388 |
)
|
389 |
np.save(imdb_filename, imdb)
|
390 |
+
#Missing: 'a7b500f3e0244a50571769aaef4fabc7' a7b500f3e0244a50571769aaef4fabc7
|