holylovenia commited on
Commit
07206cc
1 Parent(s): 0aef2e5

Upload openvivqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. openvivqa.py +162 -0
openvivqa.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+ from typing import Dict, List, Tuple
6
+
7
+ import datasets
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import Licenses, Tasks
12
+
13
+ _CITATION = """\
14
+ @inproceedings{tran2021vivqa,
15
+ title={ViVQA: Vietnamese visual question answering},
16
+ author={Tran, Khanh Quoc and Nguyen, An Trong and Le, An Tran-Hoai and Van Nguyen, Kiet},
17
+ booktitle={Proceedings of the 35th Pacific Asia Conference on Language, Information and Computation},
18
+ pages={683--691},
19
+ year={2021}
20
+ }
21
+ """
22
+ _DATASETNAME = "openvivqa"
23
+ _DESCRIPTION = """\
24
+ OpenViVQA (Open-domain Vietnamese Visual Question Answering) is a dataset for VQA (Visual Question Answering) with
25
+ open-ended answers in Vietnamese. It consisted of 11199 images associated with 37914 question-answer pairs (QAs).
26
+ Images in the OpenViVQA dataset are captured in Vietnam and question-answer pairs are created manually by Vietnamese
27
+ crowd workers.
28
+ """
29
+ _HOMEPAGE = "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset"
30
+ _LANGUAGES = ["vie"]
31
+ _LICENSE = Licenses.MIT.value
32
+ _LOCAL = False
33
+ _HF_URL = "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset"
34
+ _URLS = {
35
+ "dataset": {
36
+ "train": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/raw/main/vlsp2023_train_data.json",
37
+ "test": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/raw/main/vlsp2023_test_data.json",
38
+ "dev": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/raw/main/vlsp2023_dev_data.json",
39
+ },
40
+ "images": {
41
+ "train": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/resolve/main/train-images.zip?download=true",
42
+ "test": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/resolve/main/test-images.zip?download=true",
43
+ "dev": "https://huggingface.co/datasets/uitnlp/OpenViVQA-dataset/resolve/main/dev-images.zip?download=true",
44
+ },
45
+ }
46
+ _SUPPORTED_TASKS = [Tasks.VISUAL_QUESTION_ANSWERING]
47
+ _SOURCE_VERSION = "1.0.0"
48
+ _SEACROWD_VERSION = "2024.06.20"
49
+
50
+
51
+ class OpenViVQADataset(datasets.GeneratorBasedBuilder):
52
+
53
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
54
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
55
+
56
+ BUILDER_CONFIGS = [
57
+ SEACrowdConfig(
58
+ name=f"{_DATASETNAME}_source",
59
+ version=SOURCE_VERSION,
60
+ description=f"{_DATASETNAME} source schema",
61
+ schema="source",
62
+ subset_id=f"{_DATASETNAME}",
63
+ ),
64
+ SEACrowdConfig(
65
+ name=f"{_DATASETNAME}_seacrowd_imqa",
66
+ version=SEACROWD_VERSION,
67
+ description=f"{_DATASETNAME} SEACrowd schema",
68
+ schema="seacrowd_imqa",
69
+ subset_id=f"{_DATASETNAME}",
70
+ ),
71
+ ]
72
+
73
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
74
+
75
+ def _info(self) -> datasets.DatasetInfo:
76
+
77
+ if self.config.schema == "source":
78
+ features = datasets.Features({"img_path": datasets.Value("string"),
79
+ "question": datasets.Value("string"),
80
+ "answer": datasets.Value("string"),
81
+ "id": datasets.Value("string")})
82
+ elif self.config.schema == "seacrowd_imqa":
83
+ features = schemas.imqa_features
84
+ # features["meta"] = {"image_path": datasets.Value("string")}
85
+ else:
86
+ raise ValueError(f"No schema matched for {self.config.schema}")
87
+
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=features,
91
+ homepage=_HOMEPAGE,
92
+ license=_LICENSE,
93
+ citation=_CITATION,
94
+ )
95
+
96
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
97
+ """Returns SplitGenerators."""
98
+ data_dir = dl_manager.download_and_extract(_URLS["dataset"])
99
+ image_dir = dl_manager.download_and_extract(_URLS["images"])
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ gen_kwargs={
105
+ "filepath": data_dir["train"],
106
+ "imagepath": os.path.join(image_dir["train"], "training-images"),
107
+ "split": "train",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ gen_kwargs={
113
+ "filepath": data_dir["test"],
114
+ "imagepath": os.path.join(image_dir["test"], "test-images"),
115
+ "split": "test",
116
+ },
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.VALIDATION,
120
+ gen_kwargs={
121
+ "filepath": data_dir["dev"],
122
+ "imagepath": os.path.join(image_dir["dev"], "dev-images"),
123
+ "split": "validation",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ def _generate_examples(self, filepath: Path, imagepath: Path, split: str) -> Tuple[int, Dict]:
129
+ """Yields examples as (key, example) tuples."""
130
+
131
+ raw_examples = json.load(open(filepath, "r"))
132
+ images = raw_examples["images"]
133
+ data_annotations = raw_examples["annotations"]
134
+ for sample_id, q_key in enumerate(list(data_annotations.keys())):
135
+ quest_id = q_key
136
+ sample = data_annotations[q_key]
137
+ sample_img_id = sample["image_id"]
138
+ sample_img_name = images[str(sample_img_id)]
139
+ sample_img_path = os.path.join(imagepath, sample_img_name)
140
+ sample_question = sample["question"]
141
+ sample_answer = sample["answer"]
142
+ if self.config.schema == "source":
143
+ example = {
144
+ "img_path": sample_img_path,
145
+ "question": sample_question,
146
+ "answer": sample_answer,
147
+ "id": quest_id,
148
+ }
149
+ elif self.config.schema == "seacrowd_imqa":
150
+ example = {
151
+ "id": q_key,
152
+ "question_id": q_key,
153
+ "document_id": q_key,
154
+ "questions": [sample_question],
155
+ "type": None,
156
+ "choices": None,
157
+ "context": sample_img_id,
158
+ "answer": [sample_answer],
159
+ "image_paths": [sample_img_path],
160
+ "meta": {},
161
+ }
162
+ yield sample_id, example