Datasets:
nkjp
/

Modalities:
Text
Formats:
parquet
Languages:
Polish
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
3318f30
1 Parent(s): ffbcf8b

Convert dataset to Parquet (#10)

Browse files

- Convert dataset to Parquet (22765328d0eeb93f6d49b560b38b24e1e030fdda)
- Delete loading script (efacde1b81bc3b87a0d8943b0b19ee32b43616be)

README.md CHANGED
@@ -34,16 +34,25 @@ dataset_info:
34
  '5': time
35
  splits:
36
  - name: train
37
- num_bytes: 1612125
38
  num_examples: 15794
39
  - name: test
40
- num_bytes: 221092
41
  num_examples: 2058
42
  - name: validation
43
- num_bytes: 196652
44
  num_examples: 1941
45
- download_size: 821629
46
- dataset_size: 2029869
 
 
 
 
 
 
 
 
 
47
  ---
48
 
49
  # Dataset Card for NJKP NER
 
34
  '5': time
35
  splits:
36
  - name: train
37
+ num_bytes: 1612117
38
  num_examples: 15794
39
  - name: test
40
+ num_bytes: 221088
41
  num_examples: 2058
42
  - name: validation
43
+ num_bytes: 196648
44
  num_examples: 1941
45
+ download_size: 1447759
46
+ dataset_size: 2029853
47
+ configs:
48
+ - config_name: default
49
+ data_files:
50
+ - split: train
51
+ path: data/train-*
52
+ - split: test
53
+ path: data/test-*
54
+ - split: validation
55
+ path: data/validation-*
56
  ---
57
 
58
  # Dataset Card for NJKP NER
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a18ea62a046e214c3280d3051fa775525ea47b4978240aecf43370b13a8e116
3
+ size 157416
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44e6efd683033697df43b4295e1610e892c80a6141ed0e57cac41514dfb273d0
3
+ size 1150625
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d3626a7be86969974c3991dc349a5f5534b93171ec03f4c6a7d12e1c12f1a45
3
+ size 139718
nkjp-ner.py DELETED
@@ -1,107 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """NKJP-NER"""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
- from datasets.tasks import TextClassification
23
-
24
-
25
- _CITATION = """\
26
- @book{przepiorkowski2012narodowy,
27
- title={Narodowy korpus jezyka polskiego},
28
- author={Przepi{\'o}rkowski, Adam},
29
- year={2012},
30
- publisher={Naukowe PWN}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- The NKJP-NER is based on a human-annotated part of National Corpus of Polish (NKJP). We extracted sentences with named entities of exactly one type. The task is to predict the type of the named entity.
36
- """
37
-
38
- _HOMEPAGE = "https://klejbenchmark.com/tasks/"
39
-
40
- _LICENSE = "GNU GPL v.3"
41
-
42
- _URLs = "https://klejbenchmark.com/static/data/klej_nkjp-ner.zip"
43
-
44
-
45
- class NkjpNer(datasets.GeneratorBasedBuilder):
46
- """NKJP-NER"""
47
-
48
- VERSION = datasets.Version("1.1.0")
49
-
50
- def _info(self):
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- features=datasets.Features(
54
- {
55
- "sentence": datasets.Value("string"),
56
- "target": datasets.ClassLabel(
57
- names=[
58
- "geogName",
59
- "noEntity",
60
- "orgName",
61
- "persName",
62
- "placeName",
63
- "time",
64
- ]
65
- ),
66
- }
67
- ),
68
- supervised_keys=None,
69
- homepage=_HOMEPAGE,
70
- license=_LICENSE,
71
- citation=_CITATION,
72
- task_templates=[TextClassification(text_column="sentence", label_column="target")],
73
- )
74
-
75
- def _split_generators(self, dl_manager):
76
- """Returns SplitGenerators."""
77
- data_dir = dl_manager.download_and_extract(_URLs)
78
- return [
79
- datasets.SplitGenerator(
80
- name=datasets.Split.TRAIN,
81
- gen_kwargs={
82
- "filepath": os.path.join(data_dir, "train.tsv"),
83
- "split": "train",
84
- },
85
- ),
86
- datasets.SplitGenerator(
87
- name=datasets.Split.TEST,
88
- gen_kwargs={"filepath": os.path.join(data_dir, "test_features.tsv"), "split": "test"},
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.VALIDATION,
92
- gen_kwargs={
93
- "filepath": os.path.join(data_dir, "dev.tsv"),
94
- "split": "dev",
95
- },
96
- ),
97
- ]
98
-
99
- def _generate_examples(self, filepath, split):
100
- """Yields examples."""
101
- with open(filepath, encoding="utf-8") as f:
102
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
103
- for id_, row in enumerate(reader):
104
- yield id_, {
105
- "sentence": row["sentence"],
106
- "target": -1 if split == "test" else row["target"],
107
- }