Datasets:
pierreguillou
commited on
Commit
·
4d0cb9d
1
Parent(s):
048e3df
Update DocLayNet-large.py
Browse files- DocLayNet-large.py +23 -27
DocLayNet-large.py
CHANGED
@@ -72,17 +72,17 @@ def load_image(image_path):
|
|
72 |
logger = datasets.logging.get_logger(__name__)
|
73 |
|
74 |
|
75 |
-
class
|
76 |
-
"""BuilderConfig for DocLayNet
|
77 |
|
78 |
-
def __init__(self, **kwargs):
|
79 |
"""BuilderConfig for DocLayNet large.
|
80 |
Args:
|
81 |
**kwargs: keyword arguments forwarded to super.
|
82 |
"""
|
83 |
-
super(
|
84 |
-
|
85 |
|
|
|
86 |
class DocLayNet(datasets.GeneratorBasedBuilder):
|
87 |
"""
|
88 |
DocLayNet large is a about 99% of the dataset DocLayNet (more information at https://huggingface.co/datasets/pierreguillou/DocLayNet-large)
|
@@ -105,12 +105,14 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
105 |
# You will be able to load one or the other configurations in the following list with
|
106 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
107 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
|
|
|
|
108 |
BUILDER_CONFIGS = [
|
109 |
-
DocLayNetConfig(name=
|
110 |
]
|
111 |
|
112 |
-
|
113 |
-
|
114 |
def _info(self):
|
115 |
|
116 |
features = datasets.Features(
|
@@ -164,10 +166,9 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
164 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
165 |
|
166 |
archive_path = dl_manager.download_and_extract(_URLs)
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
dataset = datasets.SplitGenerator(
|
171 |
name=datasets.Split.TRAIN,
|
172 |
# These kwargs will be passed to _generate_examples
|
173 |
gen_kwargs={
|
@@ -175,11 +176,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
175 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/train/"),
|
176 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/train/"),
|
177 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/train/"),
|
178 |
-
"
|
179 |
},
|
180 |
-
)
|
181 |
-
|
182 |
-
dataset = datasets.SplitGenerator(
|
183 |
name=datasets.Split.VALIDATION,
|
184 |
# These kwargs will be passed to _generate_examples
|
185 |
gen_kwargs={
|
@@ -187,11 +187,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
187 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/val/"),
|
188 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/val/"),
|
189 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/val/"),
|
190 |
-
"
|
191 |
},
|
192 |
-
)
|
193 |
-
|
194 |
-
dataset = datasets.SplitGenerator(
|
195 |
name=datasets.Split.TEST,
|
196 |
# These kwargs will be passed to _generate_examples
|
197 |
gen_kwargs={
|
@@ -199,16 +198,13 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
199 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/test/"),
|
200 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/test/"),
|
201 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/test/"),
|
202 |
-
"
|
203 |
},
|
204 |
-
)
|
205 |
-
|
206 |
-
continue
|
207 |
|
208 |
-
splits.append(dataset)
|
209 |
-
return splits
|
210 |
|
211 |
-
def _generate_examples(self, filepath_0, filepath_1, filepath_2, filepath_3
|
212 |
filepath = (filepath_0, filepath_1, filepath_2, filepath_3)
|
213 |
logger.info("⏳ Generating examples from = %s", filepath)
|
214 |
ann_dirs = [os.path.join(filepath_0, "annotations"), os.path.join(filepath_1, "annotations"), os.path.join(filepath_2, "annotations"), os.path.join(filepath_3, "annotations")]
|
|
|
72 |
logger = datasets.logging.get_logger(__name__)
|
73 |
|
74 |
|
75 |
+
class DocLayNetBuilderConfig(datasets.BuilderConfig):
|
76 |
+
"""BuilderConfig for DocLayNet base"""
|
77 |
|
78 |
+
def __init__(self, name, **kwargs):
|
79 |
"""BuilderConfig for DocLayNet large.
|
80 |
Args:
|
81 |
**kwargs: keyword arguments forwarded to super.
|
82 |
"""
|
83 |
+
super().__init__(name, **kwargs)
|
|
|
84 |
|
85 |
+
|
86 |
class DocLayNet(datasets.GeneratorBasedBuilder):
|
87 |
"""
|
88 |
DocLayNet large is a about 99% of the dataset DocLayNet (more information at https://huggingface.co/datasets/pierreguillou/DocLayNet-large)
|
|
|
105 |
# You will be able to load one or the other configurations in the following list with
|
106 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
107 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
108 |
+
DEFAULT_CONFIG_NAME = "DocLayNet_2022.08_processed_on_2023.01" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
109 |
+
|
110 |
BUILDER_CONFIGS = [
|
111 |
+
DocLayNetConfig(name=DEFAULT_CONFIG_NAME, version=VERSION, description="DocLayNet large dataset"),
|
112 |
]
|
113 |
|
114 |
+
BUILDER_CONFIG_CLASS = DocLayNetBuilderConfig
|
115 |
+
|
116 |
def _info(self):
|
117 |
|
118 |
features = datasets.Features(
|
|
|
166 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
167 |
|
168 |
archive_path = dl_manager.download_and_extract(_URLs)
|
169 |
+
|
170 |
+
return [
|
171 |
+
datasets.SplitGenerator(
|
|
|
172 |
name=datasets.Split.TRAIN,
|
173 |
# These kwargs will be passed to _generate_examples
|
174 |
gen_kwargs={
|
|
|
176 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/train/"),
|
177 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/train/"),
|
178 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/train/"),
|
179 |
+
# "split_key": "train",
|
180 |
},
|
181 |
+
),
|
182 |
+
datasets.SplitGenerator(
|
|
|
183 |
name=datasets.Split.VALIDATION,
|
184 |
# These kwargs will be passed to _generate_examples
|
185 |
gen_kwargs={
|
|
|
187 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/val/"),
|
188 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/val/"),
|
189 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/val/"),
|
190 |
+
# "split_key": "validation",
|
191 |
},
|
192 |
+
),
|
193 |
+
datasets.SplitGenerator(
|
|
|
194 |
name=datasets.Split.TEST,
|
195 |
# These kwargs will be passed to _generate_examples
|
196 |
gen_kwargs={
|
|
|
198 |
"filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/test/"),
|
199 |
"filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/test/"),
|
200 |
"filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/test/"),
|
201 |
+
# "split_key": "test"
|
202 |
},
|
203 |
+
),
|
204 |
+
]
|
|
|
205 |
|
|
|
|
|
206 |
|
207 |
+
def _generate_examples(self, filepath_0, filepath_1, filepath_2, filepath_3):
|
208 |
filepath = (filepath_0, filepath_1, filepath_2, filepath_3)
|
209 |
logger.info("⏳ Generating examples from = %s", filepath)
|
210 |
ann_dirs = [os.path.join(filepath_0, "annotations"), os.path.join(filepath_1, "annotations"), os.path.join(filepath_2, "annotations"), os.path.join(filepath_3, "annotations")]
|