parquet-converter commited on
Commit
5b00cd8
1 Parent(s): c8b2dae

Update parquet files

Browse files
README.md DELETED
@@ -1,8 +0,0 @@
1
- ---
2
- language: zh
3
- license: cc-by-4.0
4
- ---
5
-
6
- 收集中文书籍总计15686本,用于学术研究和工业生产使用,书籍持续收录中,参与贡献请移步[代码仓库](https://github.com/shjwudp/shu)。
7
-
8
- The dataset constructed from Chinese books. Books are being collected continuously. Please move to [code warehouse](https://github.com/shjwudp/shu) to contribute.
 
 
 
 
 
 
 
 
 
clean_text.jsonl.gz → bad-lines/shu-train-00000-of-00006.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb2173bfa3c45577038525765c03cb647d303c41ae8f742212f69ecec016c63a
3
- size 898214366
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26f1b83cfbf914682090206d4c8c5db9d626a0a55792e1d3cd067f14c64d2be
3
+ size 385536940
bad_lines.jsonl.gz → bad-lines/shu-train-00001-of-00006.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16a0320c849abf6fad28867b71bdf592b7d812cecf2c54b20ed448c74c7acb80
3
- size 1397033113
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b516bb36b06192b4bf4b118c3d7e0327a0d7e5db360edee6d3f8979f84328688
3
+ size 434048283
books.jsonl.gz → bad-lines/shu-train-00002-of-00006.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc4b739971c7efc53cf3b9414a094aa4ec075443a81966ca4356267f64f66aba
3
- size 2302537322
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fc41b53bc3231cb8cf87cf7079595406081949ef3e2134fcf83b26baaf1e6a4
3
+ size 450743379
bad-lines/shu-train-00003-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d653f3e0d83291b189b9f4acb5d7a419720ddc3d43974611b64a12f98282490
3
+ size 392342521
bad-lines/shu-train-00004-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee4e3af5504604c0c7c8af8771a7f3abfa913e138bd49fb8a8dd4beae3060175
3
+ size 391691765
bad-lines/shu-train-00005-of-00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85a1ea70e314445b34538b6d7acf34afb99c27c3d2bdf8ab722097892d9f2435
3
+ size 101463837
books/shu-train-00000-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b92e5e628cd0f0d21d80e603824744272ef1b75fa2638edb24fedc8153da2c1
3
+ size 460234895
books/shu-train-00001-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2930b1e9c331827c97eec24770e7bcc9edfb30f5d7af656b58f8e03bf2944dd6
3
+ size 445395244
books/shu-train-00002-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddefc736c445e8fdb15ce2e13bbbbc7a6d3decf0b0e230582c982ddc73eb35a3
3
+ size 413805317
books/shu-train-00003-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df347ecc55d804e607ca0c34c52adb54954d6e5975a55691e8226a2c18997936
3
+ size 538375100
books/shu-train-00004-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6492ef576aa71127a23e3d6a0a6cea99b058e1b7fae365319980da8de43af31a
3
+ size 427345913
books/shu-train-00005-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34290a78758ab8cf363f8f245867d400442bfc5ac33c0dc947efae8a55d13912
3
+ size 413717569
books/shu-train-00006-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a1b0cedc5da14bc0a7b9a7732bdcb6f81ca6c494b801d0e8cf2c9e1b861e4a3
3
+ size 435876441
books/shu-train-00007-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169b885b9bd85bcd0243cc3bac0336d083fe019d793600889e8ea4f0dde0fc2b
3
+ size 414572278
clean-text/shu-train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eccce389c83186245ba8cbb89a4e695a61fcacb3a6a94dfca40c317023b19a73
3
+ size 329246101
clean-text/shu-train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc35bdc5fa05c90a634e2c8cd4a27d91ade9533e792a211b0a825f8d649a121c
3
+ size 378838888
clean-text/shu-train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea33e5adc7170e86dc358ff2f536e0f99d36d5f44392bc3e8e0e66ceaf36260
3
+ size 463571102
clean-text/shu-train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f0f8ac0685b0bf8022fb610299edffa40d82aff78172f1ad48b67bdb13de26
3
+ size 216229136
shu.py DELETED
@@ -1,80 +0,0 @@
1
- import json
2
- import gzip
3
-
4
- import datasets
5
-
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
-
9
-
10
- _DESCRIPTION = """\
11
- shu is a chinese book dataset.
12
- """
13
-
14
- _HOMEPAGE = "https://github.com/shjwudp/shu"
15
-
16
- _DATA_URLS = {
17
- "books": "books.jsonl.gz",
18
- "bad-lines": "bad_lines.jsonl.gz",
19
- "clean-text": "clean_text.jsonl.gz",
20
- }
21
-
22
-
23
- class ShuConfig(datasets.BuilderConfig):
24
- """BuilderConfig for shu."""
25
-
26
- def __init__(self, *args, subsets, **kwargs) -> None:
27
- """BuilderConfig for shu.
28
- Args:
29
- **kwargs: keyword arguments forwarded to super.
30
- """
31
- super(ShuConfig, self).__init__(
32
- *args,
33
- name="+".join(subsets),
34
- **kwargs
35
- )
36
- self.subsets = subsets
37
-
38
-
39
- class Shu(datasets.GeneratorBasedBuilder):
40
- """A chinese book dataset."""
41
-
42
- VERSION = datasets.Version("0.1.0")
43
-
44
- BUILDER_CONFIG_CLASS = ShuConfig
45
- BUILDER_CONFIGS = [ShuConfig(subsets=[subset]) for subset in _DATA_URLS]
46
- DEFAULT_CONFIG_NAME = "books"
47
-
48
- def _info(self):
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features({
52
- "name": datasets.Value("string"),
53
- "text": datasets.Value("string"),
54
- }),
55
- homepage=_HOMEPAGE,
56
- )
57
-
58
- def _split_generators(self, dl_manager):
59
- data_urls = {subset: _DATA_URLS[subset] for subset in self.config.subsets}
60
- archive = dl_manager.download(data_urls)
61
-
62
- return [
63
- datasets.SplitGenerator(
64
- name=datasets.Split.TRAIN,
65
- gen_kwargs={
66
- "files": {
67
- subset: archive[subset] for subset in self.config.subsets
68
- },
69
- },
70
- ),
71
- ]
72
-
73
- def _generate_examples(self, files):
74
- key = 0
75
- for subset in files:
76
- filepath = files[subset]
77
- for line in gzip.open(filepath, "rt", encoding="utf-8"):
78
- j = json.loads(line)
79
- yield key, j
80
- key += 1