diego
commited on
Commit
·
4fdd820
1
Parent(s):
4cc81a3
Fixed unexisting, class, removed load image before loading dataset
Browse files- ExpirationDate.py +35 -39
ExpirationDate.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
-
# import base64
|
4 |
-
from PIL import Image
|
5 |
import datasets
|
6 |
|
7 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
@@ -18,15 +16,14 @@ _HOMEPAGE = "https://acseker.github.io/ExpDateWebsite/"
|
|
18 |
_LICENSE = "https://licenses.nuget.org/AFL-3.0"
|
19 |
|
20 |
_URLs = {
|
21 |
-
"products_synth": "https://huggingface.co/datasets/dimun/ExpirationDate/
|
22 |
-
"products_real": "https://huggingface.co/datasets/dimun/ExpirationDate/
|
23 |
}
|
24 |
|
25 |
|
26 |
-
def
|
27 |
-
|
28 |
-
|
29 |
-
return image, (w, h)
|
30 |
|
31 |
|
32 |
logger = datasets.logging.get_logger(__name__)
|
@@ -34,6 +31,7 @@ logger = datasets.logging.get_logger(__name__)
|
|
34 |
|
35 |
class ExpirationDate(datasets.GeneratorBasedBuilder):
|
36 |
VERSION = datasets.Version("0.0.1")
|
|
|
37 |
|
38 |
def _info(self):
|
39 |
features = datasets.Features(
|
@@ -41,11 +39,8 @@ class ExpirationDate(datasets.GeneratorBasedBuilder):
|
|
41 |
"id": datasets.Value("string"),
|
42 |
"transcriptions": datasets.Sequence(datasets.Value("string")),
|
43 |
"bboxes_block": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
44 |
-
"categories": datasets.Sequence(
|
45 |
-
|
46 |
-
names=["prod", "date", "due", "code"])
|
47 |
-
),
|
48 |
-
"image": datasets.features.Image(),
|
49 |
"width": datasets.Value("int32"),
|
50 |
"height": datasets.Value("int32")
|
51 |
}
|
@@ -68,9 +63,9 @@ class ExpirationDate(datasets.GeneratorBasedBuilder):
|
|
68 |
"""Returns SplitGenerators."""
|
69 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract files
|
70 |
# based on the provided URLs
|
71 |
-
|
72 |
archive_path = dl_manager.download_and_extract(_URLs)
|
73 |
-
|
74 |
return [
|
75 |
datasets.SplitGenerator(
|
76 |
name=datasets.Split.TRAIN,
|
@@ -100,36 +95,37 @@ class ExpirationDate(datasets.GeneratorBasedBuilder):
|
|
100 |
]
|
101 |
|
102 |
def _generate_examples(self, filepath, split):
|
103 |
-
logger.info(
|
|
|
104 |
ann_file = os.path.join(filepath, split, "annotations.json")
|
105 |
-
|
106 |
# get json
|
107 |
with open(ann_file, "r", encoding="utf8") as f:
|
108 |
features_map = json.load(f)
|
109 |
-
|
110 |
img_dir = os.path.join(filepath, split, "images")
|
111 |
img_listdir = os.listdir(img_dir)
|
112 |
|
113 |
for guid, filename in enumerate(img_listdir):
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
}
|
|
|
1 |
import json
|
2 |
import os
|
|
|
|
|
3 |
import datasets
|
4 |
|
5 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
|
|
16 |
_LICENSE = "https://licenses.nuget.org/AFL-3.0"
|
17 |
|
18 |
_URLs = {
|
19 |
+
"products_synth": "https://huggingface.co/datasets/dimun/ExpirationDate/resolve/main/Products-Synth.zip?download=true",
|
20 |
+
"products_real": "https://huggingface.co/datasets/dimun/ExpirationDate/resolve/main/Products-Real.zip?download=true",
|
21 |
}
|
22 |
|
23 |
|
24 |
+
def has_extension(file_path, extensions):
|
25 |
+
_, file_extension = os.path.splitext(file_path)
|
26 |
+
return file_extension.lower() in extensions
|
|
|
27 |
|
28 |
|
29 |
logger = datasets.logging.get_logger(__name__)
|
|
|
31 |
|
32 |
class ExpirationDate(datasets.GeneratorBasedBuilder):
|
33 |
VERSION = datasets.Version("0.0.1")
|
34 |
+
CATEGORIES = ["prod", "date", "due", "code"]
|
35 |
|
36 |
def _info(self):
|
37 |
features = datasets.Features(
|
|
|
39 |
"id": datasets.Value("string"),
|
40 |
"transcriptions": datasets.Sequence(datasets.Value("string")),
|
41 |
"bboxes_block": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
|
42 |
+
"categories": datasets.Sequence(datasets.features.ClassLabel(names=self.CATEGORIES)),
|
43 |
+
"image_path": datasets.Value("string"),
|
|
|
|
|
|
|
44 |
"width": datasets.Value("int32"),
|
45 |
"height": datasets.Value("int32")
|
46 |
}
|
|
|
63 |
"""Returns SplitGenerators."""
|
64 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract files
|
65 |
# based on the provided URLs
|
66 |
+
|
67 |
archive_path = dl_manager.download_and_extract(_URLs)
|
68 |
+
|
69 |
return [
|
70 |
datasets.SplitGenerator(
|
71 |
name=datasets.Split.TRAIN,
|
|
|
95 |
]
|
96 |
|
97 |
def _generate_examples(self, filepath, split):
|
98 |
+
logger.info(
|
99 |
+
f"⏳ Generating examples from = {filepath} to the split {split}")
|
100 |
ann_file = os.path.join(filepath, split, "annotations.json")
|
101 |
+
|
102 |
# get json
|
103 |
with open(ann_file, "r", encoding="utf8") as f:
|
104 |
features_map = json.load(f)
|
105 |
+
|
106 |
img_dir = os.path.join(filepath, split, "images")
|
107 |
img_listdir = os.listdir(img_dir)
|
108 |
|
109 |
for guid, filename in enumerate(img_listdir):
|
110 |
+
if filename.endswith(".jpg"):
|
111 |
+
image_features = features_map[filename]
|
112 |
+
image_ann = image_features.get("ann")
|
113 |
+
|
114 |
+
transcriptions = [box.get("transcription", "")
|
115 |
+
for box in image_ann]
|
116 |
+
bboxes_block = [box.get("bbox") for box in image_ann]
|
117 |
+
categories = [box.get("cls") if box.get(
|
118 |
+
"cls") in self.CATEGORIES else "date" for box in image_ann]
|
119 |
+
|
120 |
+
# get image
|
121 |
+
image_path = os.path.join(img_dir, filename)
|
122 |
+
|
123 |
+
yield guid, {
|
124 |
+
"id": filename,
|
125 |
+
"transcriptions": transcriptions,
|
126 |
+
"bboxes_block": bboxes_block,
|
127 |
+
"categories": categories,
|
128 |
+
"image_path": image_path,
|
129 |
+
"width": image_features.get("width"),
|
130 |
+
"height": image_features.get("height"),
|
131 |
}
|