Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
medical
License:
"""PathVQA: 30000+ Questions for Medical Visual Question Answering""" | |
import pandas | |
import os | |
import datasets | |
_CITATION = """\ | |
@article{he2020pathvqa, | |
title={PathVQA: 30000+ Questions for Medical Visual Question Answering}, | |
author={He, Xuehai and Zhang, Yichen and Mou, Luntian and Xing, Eric and Xie, Pengtao}, | |
journal={arXiv preprint arXiv:2003.10286}, | |
year={2020} | |
} | |
""" | |
_DESCRIPTION = """\ | |
PathVQA is a dataset of question-answer pairs on pathology images. The questions are similar to those in the | |
American Board of Pathology (ABP) test. The dataset includes both open-ended questions and binary "yes/no" | |
questions. The dataset is built from two publicly-available pathology textbooks: "Textbook of Pathology" and | |
"Basic Pathology", and a publicly-available digital library: "Pathology Education Informational Resource" | |
(PEIR). The copyrights of images and captions belong to the publishers and authors of these two books, | |
and the owners of the PEIR digital library. | |
""" | |
_HOMEPAGE = "https://github.com/UCSD-AI4H/PathVQA" | |
_LICENSE = "MIT" | |
_URLS = { | |
"image_train": "data/image/train_img.tar", | |
"image_val": "data/image/val_img.tar", | |
"image_test": "data/image/test_img.tar", | |
"text_train": "data/text/train_qa.jsonl", | |
"text_val": "data/text/val_qa.jsonl", | |
"text_test": "data/text/test_qa.jsonl", | |
} | |
class PathVQA(datasets.GeneratorBasedBuilder): | |
""" | |
PathVQA: 30000+ Questions for Medical Visual Question Answering. | |
The data was obtained from the updated Google Drive link shared by the authors in their GitHub repository | |
on Feb 15, 2023, see https://github.com/UCSD-AI4H/PathVQA/commit/117e7f4ef88a0e65b0e7f37b98a73d6237a3ceab. | |
This version of the dataset contains a total of 5,004 images and 32,795 question-answer pairs. Of the | |
5,004 images, 4,289 images are referenced by a question-answer pair, while 715 images are not used. | |
Furthermore, there are several duplicates, i.e. there are some image-question-answer triplets which occur | |
more than once in the same split (train, val, test). After dropping the duplicate image-question-answer | |
triplets, the dataset contains 32,632 question-answer pairs on 4,289 images. | |
""" | |
VERSION = datasets.Version("0.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="full", version=VERSION, description="Original dataset."), | |
datasets.BuilderConfig(name="de-duped", version=VERSION, description="De-duplicated dataset."), | |
] | |
DEFAULT_CONFIG_NAME = "de-duped" | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"question": datasets.Value("string"), | |
"answer": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# images | |
image_train_dir = dl_manager.download_and_extract(_URLS["image_train"]) | |
image_val_dir = dl_manager.download_and_extract(_URLS["image_val"]) | |
image_test_dir = dl_manager.download_and_extract(_URLS["image_test"]) | |
# question-answer pairs | |
text_train_dir = dl_manager.download(_URLS["text_train"]) | |
text_val_dir = dl_manager.download(_URLS["text_val"]) | |
text_test_dir = dl_manager.download(_URLS["text_test"]) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"image_filepath": os.path.join(image_train_dir), | |
"text_filepath": os.path.join(text_train_dir), | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"image_filepath": os.path.join(image_val_dir), | |
"text_filepath": os.path.join(text_val_dir), | |
"split": "val", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"image_filepath": os.path.join(image_test_dir), | |
"text_filepath": os.path.join(text_test_dir), | |
"split": "test" | |
}, | |
), | |
] | |
def _generate_examples(self, image_filepath, text_filepath, split): | |
df = pandas.read_json(text_filepath, orient='records', lines=True) | |
if self.config.name == "de-duped": | |
df = df.drop_duplicates(ignore_index=True) | |
for key, row in df.iterrows(): | |
yield key, { | |
"image": os.path.join(image_filepath, row['image']), | |
"question": row["question"], | |
"answer": row["answer"] | |
} | |