upload summarization data
Browse files- .gitattributes +2 -0
- book-summarization.py +74 -0
- train_data.json +3 -0
- validation_data.json +3 -0
.gitattributes
CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
train_data.json filter=lfs diff=lfs merge=lfs -text
|
29 |
+
validation_data.json filter=lfs diff=lfs merge=lfs -text
|
book-summarization.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import json
|
3 |
+
import pandas as pd
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
logger = datasets.logging.get_logger(__name__)
|
7 |
+
|
8 |
+
_DESCRIPTION = """\
|
9 |
+
Korean Book Summarization Data
|
10 |
+
"""
|
11 |
+
|
12 |
+
_URL = "https://huggingface.co/datasets/LeverageX/book-summarization/resolve/main/"
|
13 |
+
_URLS = {
|
14 |
+
"train_data": _URL + "train_data.json",
|
15 |
+
"validation_data": _URL + "validation_data.json",
|
16 |
+
}
|
17 |
+
|
18 |
+
class KoreanNewspaper(datasets.GeneratorBasedBuilder):
|
19 |
+
|
20 |
+
BUILDER_CONFIGS = [
|
21 |
+
datasets.BuilderConfig(
|
22 |
+
name="Aihub Book Summarization",
|
23 |
+
version=datasets.Version("1.0.0", ""),
|
24 |
+
description="Korean Summarization Data",
|
25 |
+
),
|
26 |
+
]
|
27 |
+
|
28 |
+
def _info(self):
|
29 |
+
return datasets.DatasetInfo(
|
30 |
+
description=_DESCRIPTION,
|
31 |
+
features=datasets.Features(
|
32 |
+
{
|
33 |
+
"id": datasets.Value("string"),
|
34 |
+
"name": datasets.Value("string"),
|
35 |
+
"publisher": datasets.Value("string"),
|
36 |
+
"passage": datasets.Value("string"),
|
37 |
+
"summary": datasets.Value("string"),
|
38 |
+
}
|
39 |
+
),
|
40 |
+
# No default supervised_keys (as we have to pass both question
|
41 |
+
# and context as input).
|
42 |
+
supervised_keys=None,
|
43 |
+
homepage="https://aihub.or.kr/aidata/30713",
|
44 |
+
)
|
45 |
+
|
46 |
+
def _split_generators(self, dl_manager):
|
47 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
48 |
+
return [
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
|
50 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, filepath):
|
54 |
+
"""This function returns the examples in the raw (text) form."""
|
55 |
+
logger.info("generating examples from = %s", filepath)
|
56 |
+
key = 0
|
57 |
+
with open(filepath, encoding="utf-8") as f :
|
58 |
+
data = json.load(f)
|
59 |
+
|
60 |
+
for info in data :
|
61 |
+
doc_id = info['id']
|
62 |
+
doc_name = info['name']
|
63 |
+
publisher = info['publisher']
|
64 |
+
passage = info['passage']
|
65 |
+
summary = info['summary']
|
66 |
+
|
67 |
+
yield key, {
|
68 |
+
"id" : doc_id,
|
69 |
+
"name" : doc_name,
|
70 |
+
"publisher" : publisher,
|
71 |
+
"passage" : passage,
|
72 |
+
"summary" : summary,
|
73 |
+
}
|
74 |
+
key += 1
|
train_data.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b52e380c110d3d9cf3c0d9336bee3c876d7d19e47ff7a1821c88ed89e2160ccb
|
3 |
+
size 456828010
|
validation_data.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca1a64ca2593b9d8ec4a0cd950737a645ac3d954ba8f59097cc6e92c82c09901
|
3 |
+
size 56153159
|