holylovenia
commited on
Upload kopi_nllb.py with huggingface_hub
Browse files- kopi_nllb.py +12 -12
kopi_nllb.py
CHANGED
@@ -18,9 +18,9 @@ import json
|
|
18 |
import datasets
|
19 |
import zstandard as zstd
|
20 |
|
21 |
-
from
|
22 |
-
from
|
23 |
-
from
|
24 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
25 |
|
26 |
logger = datasets.logging.get_logger(__name__)
|
@@ -62,7 +62,7 @@ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
|
|
62 |
|
63 |
_LANGUAGES = ["ind", "jav", "ace", "ban", "bjn", "min", "sun"]
|
64 |
|
65 |
-
|
66 |
|
67 |
_SOURCE_VERSION = "2022.09.13"
|
68 |
|
@@ -70,20 +70,20 @@ _LOCAL = False
|
|
70 |
|
71 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
72 |
|
73 |
-
_UNIFIED_VIEW_NAME =
|
74 |
|
75 |
_URL = "https://huggingface.co/datasets/allenai/nllb"
|
76 |
|
77 |
|
78 |
-
def
|
79 |
-
"""Construct
|
80 |
-
if schema != "source" and schema != "
|
81 |
raise ValueError(f"Invalid schema: {schema}")
|
82 |
|
83 |
if lang == "":
|
84 |
raise ValueError(f"Snapshot is required. Choose one of these Snapshot: {_ALL_CONFIG}.")
|
85 |
elif lang in _ALL_CONFIG:
|
86 |
-
return
|
87 |
name=f"{_DATASETNAME}_{lang}_{schema}",
|
88 |
version=datasets.Version(version),
|
89 |
description=f"KoPI-NLLB with {schema} schema for {lang}",
|
@@ -108,7 +108,7 @@ class KoPINLLBConfig(datasets.BuilderConfig):
|
|
108 |
class KoPINLLB(datasets.GeneratorBasedBuilder):
|
109 |
"""KoPI NLLB corpus."""
|
110 |
|
111 |
-
BUILDER_CONFIGS = [
|
112 |
|
113 |
def _info(self):
|
114 |
|
@@ -121,7 +121,7 @@ class KoPINLLB(datasets.GeneratorBasedBuilder):
|
|
121 |
"source": datasets.Value("string"),
|
122 |
}
|
123 |
)
|
124 |
-
elif self.config.schema == "
|
125 |
features = schemas.self_supervised_pretraining.features
|
126 |
return datasets.DatasetInfo(
|
127 |
description=_DESCRIPTION,
|
@@ -151,7 +151,7 @@ class KoPINLLB(datasets.GeneratorBasedBuilder):
|
|
151 |
for line in f:
|
152 |
if line:
|
153 |
example = json.loads(line)
|
154 |
-
if self.config.schema == "
|
155 |
yield id_, {"id": str(id_), "text": example["text"]}
|
156 |
id_ += 1
|
157 |
else:
|
|
|
18 |
import datasets
|
19 |
import zstandard as zstd
|
20 |
|
21 |
+
from seacrowd.utils import schemas
|
22 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
23 |
+
from seacrowd.utils.constants import (DEFAULT_SEACROWD_VIEW_NAME,
|
24 |
DEFAULT_SOURCE_VIEW_NAME, Tasks)
|
25 |
|
26 |
logger = datasets.logging.get_logger(__name__)
|
|
|
62 |
|
63 |
_LANGUAGES = ["ind", "jav", "ace", "ban", "bjn", "min", "sun"]
|
64 |
|
65 |
+
_SEACROWD_VERSION = "2024.06.20"
|
66 |
|
67 |
_SOURCE_VERSION = "2022.09.13"
|
68 |
|
|
|
70 |
|
71 |
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
72 |
|
73 |
+
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
|
74 |
|
75 |
_URL = "https://huggingface.co/datasets/allenai/nllb"
|
76 |
|
77 |
|
78 |
+
def seacrowd_config_constructor(lang, schema, version):
|
79 |
+
"""Construct SEACrowdConfig"""
|
80 |
+
if schema != "source" and schema != "seacrowd_ssp":
|
81 |
raise ValueError(f"Invalid schema: {schema}")
|
82 |
|
83 |
if lang == "":
|
84 |
raise ValueError(f"Snapshot is required. Choose one of these Snapshot: {_ALL_CONFIG}.")
|
85 |
elif lang in _ALL_CONFIG:
|
86 |
+
return SEACrowdConfig(
|
87 |
name=f"{_DATASETNAME}_{lang}_{schema}",
|
88 |
version=datasets.Version(version),
|
89 |
description=f"KoPI-NLLB with {schema} schema for {lang}",
|
|
|
108 |
class KoPINLLB(datasets.GeneratorBasedBuilder):
|
109 |
"""KoPI NLLB corpus."""
|
110 |
|
111 |
+
BUILDER_CONFIGS = [seacrowd_config_constructor(sn, "source", _SOURCE_VERSION) for sn in _ALL_CONFIG] + [seacrowd_config_constructor(sn, "seacrowd_ssp", _SEACROWD_VERSION) for sn in _ALL_CONFIG]
|
112 |
|
113 |
def _info(self):
|
114 |
|
|
|
121 |
"source": datasets.Value("string"),
|
122 |
}
|
123 |
)
|
124 |
+
elif self.config.schema == "seacrowd_ssp":
|
125 |
features = schemas.self_supervised_pretraining.features
|
126 |
return datasets.DatasetInfo(
|
127 |
description=_DESCRIPTION,
|
|
|
151 |
for line in f:
|
152 |
if line:
|
153 |
example = json.loads(line)
|
154 |
+
if self.config.schema == "seacrowd_ssp":
|
155 |
yield id_, {"id": str(id_), "text": example["text"]}
|
156 |
id_ += 1
|
157 |
else:
|