svanhvit commited on
Commit
c9d876b
1 Parent(s): 2f63824

conll tags added

Browse files
Files changed (4) hide show
  1. README.md +15 -0
  2. create_splits.py +175 -0
  3. dataset_infos.json +104 -0
  4. icelandic-ner-MIM-GOLD-NER.py +184 -0
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NER for Icelandic - MIM-GOLD-NER splits
2
+
3
+ ## MIM-GOLD-NER
4
+
5
+ The original MIM-GOLD-NER data is found at http://hdl.handle.net/20.500.12537/42
6
+
7
+ This repository packages the data for use with the Datasets library from hugginface.
8
+
9
+ ## Old splits
10
+
11
+ *This is no longer in use.*
12
+
13
+ At the time of creation, the original data did not have train, dev and test splits. `create_splits.py` was used to create temporary splits.
14
+
15
+
create_splits.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # want data from all documents
3
+ # want data from all classes
4
+ #
5
+
6
+ file_names = [
7
+ "adjudications.txt",
8
+ "blog.txt",
9
+ "books.txt",
10
+ "emails.txt",
11
+ "fbl.txt",
12
+ "laws.txt",
13
+ "mbl.txt",
14
+ "radio_tv_news.txt",
15
+ "school_essays.txt",
16
+ "scienceweb.txt",
17
+ "webmedia.txt",
18
+ "websites.txt",
19
+ "written-to-be-spoken.txt"
20
+ ]
21
+
22
+ def read_file(file_name):
23
+ data = []
24
+ sentence = []
25
+ with open(file_name) as fh:
26
+ for line in fh.readlines():
27
+ if not line.strip() and sentence:
28
+ data.append(sentence)
29
+ sentence = []
30
+ continue
31
+ w, t = line.strip().split()
32
+ sentence.append((w, t))
33
+ return data
34
+
35
+ from collections import defaultdict
36
+ def calc_stats(data):
37
+ stats = defaultdict(int)
38
+ for sent in data:
39
+ stats["n_sentences"] += 1
40
+ for token, label in sent:
41
+ stats[label] += 1
42
+ return stats
43
+
44
+
45
+ import pprint
46
+ def get_total_stats():
47
+ total_stats = defaultdict(int)
48
+ for file_name in file_names:
49
+ d = read_file("data/"+file_name)
50
+ stats = calc_stats(d)
51
+ #print(f"--- [{file_name}]---")
52
+ #pprint.pprint(stats)
53
+ for k, v in stats.items():
54
+ total_stats[k] += v
55
+ #print("---- TOTAL ---- ")
56
+ #pprint.pprint(total_stats)
57
+ return total_stats
58
+
59
+ import random
60
+ random.seed(1)
61
+
62
+ def check_if_not_done(stats, total_stats, target):
63
+ for k, v in total_stats.items():
64
+ if v * target > stats[k]:
65
+ return True
66
+ return False
67
+
68
+ def create_splits(train=0.8, test=0.1, dev=0.1):
69
+ train_data = []
70
+ test_data = []
71
+ dev_data = []
72
+
73
+ total_stats = get_total_stats()
74
+
75
+ for file_name in file_names:
76
+ train_stats = defaultdict(int)
77
+ test_stats = defaultdict(int)
78
+ dev_stats = defaultdict(int)
79
+
80
+ d = read_file("data/"+file_name)
81
+ stats = calc_stats(d)
82
+ random.shuffle(d)
83
+
84
+ file_train = []
85
+ file_test = []
86
+ file_dev = []
87
+
88
+ for sent in d:
89
+ if check_if_not_done(test_stats, stats, test):
90
+ # TEST data
91
+ use = False
92
+ for token in sent:
93
+ w, tag = token
94
+ if tag == 'O':
95
+ continue
96
+ if test_stats[tag] < test * stats[tag] - 5:
97
+ use = True
98
+ if test_stats['n_sentences'] < test * stats['n_sentences'] - 5:
99
+ use = True
100
+ if use:
101
+ file_test.append(sent)
102
+ test_stats['n_sentences'] += 1
103
+ for w, t in sent:
104
+ test_stats[t] += 1
105
+ elif check_if_not_done(dev_stats, stats, dev):
106
+ # DEV DATA
107
+ use = False
108
+ for token in sent:
109
+ w, tag = token
110
+ if tag == 'O':
111
+ continue
112
+ if dev_stats[tag] < dev * stats[tag] - 5:
113
+ use = True
114
+ if dev_stats['n_sentences'] < dev * stats['n_sentences'] - 5:
115
+ use = True
116
+ if use:
117
+ file_dev.append(sent)
118
+ dev_stats['n_sentences'] += 1
119
+ for w, t in sent:
120
+ dev_stats[t] += 1
121
+ else:
122
+ file_train.append(sent)
123
+ train_stats['n_sentences'] += 1
124
+ for w, t in sent:
125
+ train_stats[t] += 1
126
+ else:
127
+ file_train.append(sent)
128
+ train_stats['n_sentences'] += 1
129
+ for w, t in sent:
130
+ train_stats[t] += 1
131
+ try:
132
+ assert len(d) == len(file_train) + len(file_dev) + len(file_test)
133
+ except:
134
+ import pdb; pdb.set_trace()
135
+ train_data += file_train
136
+ test_data += file_test
137
+ dev_data += file_dev
138
+
139
+ return train_data, test_data, dev_data
140
+
141
+ train, test, dev = create_splits()
142
+
143
+ total_stats = get_total_stats()
144
+ print("---- total -----")
145
+ pprint.pprint(total_stats)
146
+ print("----- test ----")
147
+ test_stats = calc_stats(test)
148
+ pprint.pprint(test_stats)
149
+ print("----- dev ----")
150
+ dev_stats = calc_stats(dev)
151
+ pprint.pprint(dev_stats)
152
+ print("----- train ----")
153
+ train_stats = calc_stats(train)
154
+ pprint.pprint(train_stats)
155
+
156
+
157
+ with open("train.txt", "w") as outf:
158
+ for sent in train:
159
+ for w, t in sent:
160
+ outf.writelines(f"{w} {t}\n")
161
+ outf.writelines("\n")
162
+
163
+
164
+ with open("test.txt", "w") as outf:
165
+ for sent in test:
166
+ for w, t in sent:
167
+ outf.writelines(f"{w} {t}\n")
168
+ outf.writelines("\n")
169
+
170
+
171
+ with open("dev.txt", "w") as outf:
172
+ for sent in dev:
173
+ for w, t in sent:
174
+ outf.writelines(f"{w} {t}\n")
175
+ outf.writelines("\n")
dataset_infos.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "mim-gold-ner": {
3
+ "description": "This Icelandic named entity (NE) corpus, MIM-GOLD-NER, is a version of the MIM-GOLD corpus tagged for NEs. Over 48 thousand NEs are tagged in this corpus of one million tokens, which can be used for training named entity recognizers for Icelandic.\nThe MIM-GOLD-NER corpus was developed at Reykjavik University in 2018–2020, funded by the Strategic Research and Development Programme for Language Technology (LT). Two LT students were in charge of the corpus annotation and of training named entity recognizers using machine learning methods.\nA semi-automatic approach was used for annotating the corpus. Lists of Icelandic person names, location names, and company names were compiled and used for extracting and classifying as many named entities as possible. Regular expressions were then used to find certain numerical entities in the corpus. After this automatic pre-processing step, the whole corpus was reviewed manually to correct any errors. \nThe Named Entity Corpus corpus is distributed with the same special user license as MIM-GOLD, which is based on the MIM license, since the texts in MIM-GOLD were sampled from the MIM corpus.",
4
+ "citation": "@misc{20.500.12537/42,\n title = {{MIM}-{GOLD}-{NER} – named entity recognition corpus},\n author = {Ing{\\'o}lfsd{\\'o}ttir, Svanhv{\\'{\\i}}t and Gu{\\dh}j{\\'o}nsson, {\\'A}smundur Alma and Loftsson, Hrafn},\nurl = {http://hdl.handle.net/20.500.12537/42},\nnote = {{CLARIN}-{IS}},\ncopyright = {Icelandic Gigaword Corpus Part1},\nyear = {2020} }\n",
5
+ "homepage": "http://hdl.handle.net/20.500.12537/42",
6
+ "license": "",
7
+ "features": {
8
+ "id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "tokens": {
14
+ "feature": {
15
+ "dtype": "string",
16
+ "id": null,
17
+ "_type": "Value"
18
+ },
19
+ "length": -1,
20
+ "id": null,
21
+ "_type": "Sequence"
22
+ },
23
+ "ner_tags": {
24
+ "feature": {
25
+ "num_classes": 17,
26
+ "names": [
27
+ "O",
28
+ "B-Date",
29
+ "B-Location",
30
+ "B-Miscellaneous",
31
+ "B-Money",
32
+ "B-Organization",
33
+ "B-Percent",
34
+ "B-Person",
35
+ "B-Time",
36
+ "I-Date",
37
+ "I-Location",
38
+ "I-Miscellaneous",
39
+ "I-Money",
40
+ "I-Organization",
41
+ "I-Percent",
42
+ "I-Person",
43
+ "I-Time"
44
+ ],
45
+ "names_file": null,
46
+ "id": null,
47
+ "_type": "ClassLabel"
48
+ },
49
+ "length": -1,
50
+ "id": null,
51
+ "_type": "Sequence"
52
+ }
53
+ },
54
+ "post_processed": null,
55
+ "supervised_keys": null,
56
+ "builder_name": "mim-gold-ner",
57
+ "config_name": "mim-gold-ner",
58
+ "version": {
59
+ "version_str": "2.0.0",
60
+ "description": null,
61
+ "major": 2,
62
+ "minor": 0,
63
+ "patch": 0
64
+ },
65
+ "splits": {
66
+ "train": {
67
+ "name": "train",
68
+ "num_bytes": 14538308,
69
+ "num_examples": 46454,
70
+ "dataset_name": "mim-gold-ner"
71
+ },
72
+ "validation": {
73
+ "name": "validation",
74
+ "num_bytes": 1808522,
75
+ "num_examples": 6480,
76
+ "dataset_name": "mim-gold-ner"
77
+ },
78
+ "test": {
79
+ "name": "test",
80
+ "num_bytes": 1850644,
81
+ "num_examples": 5890,
82
+ "dataset_name": "mim-gold-ner"
83
+ }
84
+ },
85
+ "download_checksums": {
86
+ "https://vesteinn.is/train.txt": {
87
+ "num_bytes": 7149961,
88
+ "checksum": "e5dc575215d9479e5203616e33276add808b935a33e192b354e72b247ae016cf"
89
+ },
90
+ "https://vesteinn.is/dev.txt": {
91
+ "num_bytes": 873215,
92
+ "checksum": "0073ee5d446e6684bcce7d48af6294efc27fccf8f40492786d997f81929573e6"
93
+ },
94
+ "https://vesteinn.is/test.txt": {
95
+ "num_bytes": 926148,
96
+ "checksum": "19f23dab7209f5669fe2e359c973f9d7149d2ec9bbb9200e062e06bc2a58225a"
97
+ }
98
+ },
99
+ "download_size": 4858952,
100
+ "post_processing_size": null,
101
+ "dataset_size": 10252718,
102
+ "size_in_bytes": 15111670
103
+ }
104
+ }
icelandic-ner-MIM-GOLD-NER.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ # Modified by Vésteinn Snæbjarnarson 2021
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ # Lint as: python3
19
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
20
+
21
+ import datasets
22
+
23
+
24
+ logger = datasets.logging.get_logger(__name__)
25
+
26
+
27
+ _CITATION = """\
28
+ @misc{20.500.12537/42,
29
+ title = {{MIM}-{GOLD}-{NER} – named entity recognition corpus},
30
+ author = {Ing{\'o}lfsd{\'o}ttir, Svanhv{\'{\i}}t and Gu{\dh}j{\'o}nsson, {\'A}smundur Alma and Loftsson, Hrafn},
31
+ url = {http://hdl.handle.net/20.500.12537/42},
32
+ note = {{CLARIN}-{IS}},
33
+ copyright = {Icelandic Gigaword Corpus Part1},
34
+ year = {2020} }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ This Icelandic named entity (NE) corpus, MIM-GOLD-NER, is a version of the MIM-GOLD corpus tagged for NEs. Over 48 thousand NEs are tagged in this corpus of one million tokens, which can be used for training named entity recognizers for Icelandic.
39
+
40
+ The MIM-GOLD-NER corpus was developed at Reykjavik University in 2018–2020, funded by the Strategic Research and Development Programme for Language Technology (LT). Two LT students were in charge of the corpus annotation and of training named entity recognizers using machine learning methods.
41
+
42
+ A semi-automatic approach was used for annotating the corpus. Lists of Icelandic person names, location names, and company names were compiled and used for extracting and classifying as many named entities as possible. Regular expressions were then used to find certain numerical entities in the corpus. After this automatic pre-processing step, the whole corpus was reviewed manually to correct any errors. The corpus is tagged for eight named entity types:
43
+
44
+ PERSON – names of humans, animals and other beings, real or fictional.
45
+ LOCATION – names of locations, real or fictional, i.e. buildings, street and place names, both real and fictional. All geographical and geopolitical entities such as cities, countries, counties and regions, as well as planet names and other outer space entities.
46
+ ORGANIZATION – companies and other organizations, public or private, real or fictional. Schools, churches, swimming pools, community centers, musical groups, other affiliations.
47
+ MISCELLANEOUS – proper nouns that don’t belong to the previous three categories, such as products, books and movie titles, events, such as wars, sports tournaments, festivals, concerts, etc.
48
+ DATE – absolute temporal units of a full day or longer, such as days, months, years, centuries, both written numerically and alphabetically.
49
+ TIME – absolute temporal units shorter than a full day, such as seconds, minutes, or hours, both written numerically and alphabetically.
50
+ MONEY – exact monetary amounts in any currency, both written numerically and alphabetically.
51
+ PERCENT – percentages, both written numerically and alphabetically
52
+
53
+ MIM-GOLD-NER is intended for training of named entity recognizers for Icelandic. It is in the CoNLL format, and the position of each token within the NE is marked using the BIO tagging format. The corpus can be used in its entirety or by training on subsets of the text types that best fit the intended domain.
54
+
55
+ The Named Entity Corpus corpus is distributed with the same special user license as MIM-GOLD, which is based on the MIM license, since the texts in MIM-GOLD were sampled from the MIM corpus."""
56
+
57
+ _URL = "https://vesteinn.is/"
58
+ _TRAINING_FILE = "train.txt"
59
+ _DEV_FILE = "dev.txt"
60
+ _TEST_FILE = "test.txt"
61
+
62
+
63
+ class MIMGoldNERConfig(datasets.BuilderConfig):
64
+ """BuilderConfig for MIM-GOLD-NER"""
65
+
66
+ def __init__(self, **kwargs):
67
+ """BuilderConfig for MIM-GOLD-NER.
68
+ Args:
69
+ **kwargs: keyword arguments forwarded to super.
70
+ """
71
+ super(MIMGoldNERConfig, self).__init__(**kwargs)
72
+
73
+
74
+ class MIMGoldNER(datasets.GeneratorBasedBuilder):
75
+ """MIM-GOLD-NER dataset."""
76
+
77
+ BUILDER_CONFIGS = [
78
+ MIMGoldNERConfig(name="mim-gold-ner", version=datasets.Version("2.0.0"), description="MIM-GOLD-NER dataset"),
79
+ ]
80
+
81
+ def _info(self):
82
+ return datasets.DatasetInfo(
83
+ description=_DESCRIPTION,
84
+ features=datasets.Features(
85
+ {
86
+ "id": datasets.Value("string"),
87
+ "tokens": datasets.Sequence(datasets.Value("string")),
88
+ "ner_tags": datasets.Sequence(
89
+ datasets.features.ClassLabel(
90
+ names=[
91
+ "O",
92
+ "B-Date",
93
+ "B-Location",
94
+ "B-Miscellaneous",
95
+ "B-Money",
96
+ "B-Organization",
97
+ "B-Percent",
98
+ "B-Person",
99
+ "B-Time",
100
+ "I-Date",
101
+ "I-Location",
102
+ "I-Miscellaneous",
103
+ "I-Money",
104
+ "I-Organization",
105
+ "I-Percent",
106
+ "I-Person",
107
+ "I-Time"
108
+ ]
109
+ )
110
+ ),
111
+ "conll_ner_tags": datasets.Sequence(
112
+ datasets.features.ClassLabel(
113
+ names=[
114
+ "O",
115
+ "B-PER",
116
+ "I-PER",
117
+ "B-ORG",
118
+ "I-ORG",
119
+ "B-LOC",
120
+ "I-LOC",
121
+ "B-MISC",
122
+ "I-MISC"
123
+ ]
124
+ )
125
+ ),
126
+ }
127
+ ),
128
+ supervised_keys=None,
129
+ homepage="http://hdl.handle.net/20.500.12537/42",
130
+ citation=_CITATION,
131
+ )
132
+
133
+ def _split_generators(self, dl_manager):
134
+ """Returns SplitGenerators."""
135
+ urls_to_download = {
136
+ "train": f"{_URL}{_TRAINING_FILE}",
137
+ "dev": f"{_URL}{_DEV_FILE}",
138
+ "test": f"{_URL}{_TEST_FILE}",
139
+ }
140
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
141
+
142
+ return [
143
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
144
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
145
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
146
+ ]
147
+
148
+ def _generate_examples(self, filepath):
149
+ logger.info("⏳ Generating examples from = %s", filepath)
150
+ with open(filepath, encoding="utf-8") as f:
151
+ guid = 0
152
+ tokens = []
153
+ ner_tags = []
154
+ conll_ner_tags = []
155
+ for line in f:
156
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
157
+ if tokens:
158
+ yield guid, {
159
+ "id": str(guid),
160
+ "tokens": tokens,
161
+ "ner_tags": ner_tags,
162
+ "conll_ner_tags": conll_ner_tags,
163
+ }
164
+ guid += 1
165
+ tokens = []
166
+ ner_tags = []
167
+ conll_ner_tags = []
168
+ else:
169
+ # tokens are tab separated
170
+ splits = line.split("\t")
171
+ tokens.append(splits[0])
172
+ try:
173
+ ner_tags.append(splits[1].rstrip())
174
+ conll_ner_tags.append(splits[1].rstrip())
175
+ except:
176
+ print(splits)
177
+ raise
178
+ # last example
179
+ yield guid, {
180
+ "id": str(guid),
181
+ "tokens": tokens,
182
+ "ner_tags": ner_tags,
183
+ "conll_ner_tags": conll_ner_tags,
184
+ }