Datasets:
change script
Browse files
lccc.py
CHANGED
@@ -13,9 +13,9 @@
|
|
13 |
# limitations under the License.
|
14 |
"""
|
15 |
LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.
|
16 |
-
A rigorous data cleaning pipeline is designed to ensure the quality of the corpus.
|
17 |
-
This pipeline involves a set of rules and several classifier-based filters.
|
18 |
-
Noises such as offensive or sensitive words, special symbols, emojis,
|
19 |
grammatically incorrect sentences, and incoherent conversations are filtered.
|
20 |
"""
|
21 |
|
@@ -39,9 +39,9 @@ url={https://arxiv.org/abs/2008.03946}
|
|
39 |
# Description of the dataset here
|
40 |
_DESCRIPTION = """\
|
41 |
LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.
|
42 |
-
A rigorous data cleaning pipeline is designed to ensure the quality of the corpus.
|
43 |
-
This pipeline involves a set of rules and several classifier-based filters.
|
44 |
-
Noises such as offensive or sensitive words, special symbols, emojis,
|
45 |
grammatically incorrect sentences, and incoherent conversations are filtered.
|
46 |
"""
|
47 |
|
@@ -53,7 +53,7 @@ _URLS = {
|
|
53 |
"train": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_train.jsonl.gz",
|
54 |
"valid": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_valid.jsonl.gz",
|
55 |
"test": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_test.jsonl.gz",
|
56 |
-
}
|
57 |
}
|
58 |
|
59 |
|
@@ -70,7 +70,7 @@ class LCCC(datasets.GeneratorBasedBuilder):
|
|
70 |
def _info(self):
|
71 |
features = datasets.Features(
|
72 |
{
|
73 |
-
"dialog": datasets.Value("string"),
|
74 |
}
|
75 |
)
|
76 |
return datasets.DatasetInfo(
|
@@ -95,23 +95,32 @@ class LCCC(datasets.GeneratorBasedBuilder):
|
|
95 |
if self.config.name == "large":
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
-
name=datasets.Split.TRAIN,
|
99 |
-
gen_kwargs={
|
|
|
|
|
|
|
100 |
)
|
101 |
]
|
102 |
if self.config.name == "base":
|
103 |
return [
|
104 |
datasets.SplitGenerator(
|
105 |
name=datasets.Split.TRAIN,
|
106 |
-
gen_kwargs={
|
|
|
|
|
|
|
107 |
),
|
108 |
datasets.SplitGenerator(
|
109 |
name=datasets.Split.TEST,
|
110 |
-
gen_kwargs={
|
111 |
),
|
112 |
datasets.SplitGenerator(
|
113 |
name=datasets.Split.VALIDATION,
|
114 |
-
gen_kwargs={
|
|
|
|
|
|
|
115 |
),
|
116 |
]
|
117 |
|
@@ -120,7 +129,8 @@ class LCCC(datasets.GeneratorBasedBuilder):
|
|
120 |
with open(filepath, encoding="utf-8") as f:
|
121 |
for key, row in enumerate(f):
|
122 |
row = row.strip()
|
123 |
-
if len(row) == 0:
|
|
|
124 |
yield key, {
|
125 |
"dialog": json.loads(row),
|
126 |
}
|
|
|
13 |
# limitations under the License.
|
14 |
"""
|
15 |
LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.
|
16 |
+
A rigorous data cleaning pipeline is designed to ensure the quality of the corpus.
|
17 |
+
This pipeline involves a set of rules and several classifier-based filters.
|
18 |
+
Noises such as offensive or sensitive words, special symbols, emojis,
|
19 |
grammatically incorrect sentences, and incoherent conversations are filtered.
|
20 |
"""
|
21 |
|
|
|
39 |
# Description of the dataset here
|
40 |
_DESCRIPTION = """\
|
41 |
LCCC: Large-scale Cleaned Chinese Conversation corpus (LCCC) is a large corpus of Chinese conversations.
|
42 |
+
A rigorous data cleaning pipeline is designed to ensure the quality of the corpus.
|
43 |
+
This pipeline involves a set of rules and several classifier-based filters.
|
44 |
+
Noises such as offensive or sensitive words, special symbols, emojis,
|
45 |
grammatically incorrect sentences, and incoherent conversations are filtered.
|
46 |
"""
|
47 |
|
|
|
53 |
"train": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_train.jsonl.gz",
|
54 |
"valid": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_valid.jsonl.gz",
|
55 |
"test": "https://huggingface.co/datasets/silver/lccc/resolve/main/lccc_base_test.jsonl.gz",
|
56 |
+
},
|
57 |
}
|
58 |
|
59 |
|
|
|
70 |
def _info(self):
|
71 |
features = datasets.Features(
|
72 |
{
|
73 |
+
"dialog": [datasets.Value("string")],
|
74 |
}
|
75 |
)
|
76 |
return datasets.DatasetInfo(
|
|
|
95 |
if self.config.name == "large":
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
+
name=datasets.Split.TRAIN,
|
99 |
+
gen_kwargs={
|
100 |
+
"filepath": os.path.join(downloaded_data),
|
101 |
+
"split": "train",
|
102 |
+
},
|
103 |
)
|
104 |
]
|
105 |
if self.config.name == "base":
|
106 |
return [
|
107 |
datasets.SplitGenerator(
|
108 |
name=datasets.Split.TRAIN,
|
109 |
+
gen_kwargs={
|
110 |
+
"filepath": os.path.join(downloaded_data["train"]),
|
111 |
+
"split": "train",
|
112 |
+
},
|
113 |
),
|
114 |
datasets.SplitGenerator(
|
115 |
name=datasets.Split.TEST,
|
116 |
+
gen_kwargs={"filepath": os.path.join(downloaded_data["test"]), "split": "test"},
|
117 |
),
|
118 |
datasets.SplitGenerator(
|
119 |
name=datasets.Split.VALIDATION,
|
120 |
+
gen_kwargs={
|
121 |
+
"filepath": os.path.join(downloaded_data["valid"]),
|
122 |
+
"split": "dev",
|
123 |
+
},
|
124 |
),
|
125 |
]
|
126 |
|
|
|
129 |
with open(filepath, encoding="utf-8") as f:
|
130 |
for key, row in enumerate(f):
|
131 |
row = row.strip()
|
132 |
+
if len(row) == 0:
|
133 |
+
continue
|
134 |
yield key, {
|
135 |
"dialog": json.loads(row),
|
136 |
}
|