system HF staff commited on
Commit
8e3b9cf
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +155 -0
  3. dataset_infos.json +1 -0
  4. dummy/0.0.0/dummy_data.zip +3 -0
  5. turkish_ner.py +170 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - expert-generated
6
+ languages:
7
+ - tr
8
+ licenses:
9
+ - cc-by-4-0
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 100K<n<1M
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - structure-prediction
18
+ task_ids:
19
+ - named-entity-recognition
20
+ ---
21
+
22
+
23
+ # Dataset Card for turkish_ner
24
+
25
+ ## Table of Contents
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
29
+ - [Languages](#languages)
30
+ - [Dataset Structure](#dataset-structure)
31
+ - [Data Instances](#data-instances)
32
+ - [Data Fields](#data-instances)
33
+ - [Data Splits](#data-instances)
34
+ - [Dataset Creation](#dataset-creation)
35
+ - [Curation Rationale](#curation-rationale)
36
+ - [Source Data](#source-data)
37
+ - [Annotations](#annotations)
38
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
39
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
40
+ - [Social Impact of Dataset](#social-impact-of-dataset)
41
+ - [Discussion of Biases](#discussion-of-biases)
42
+ - [Other Known Limitations](#other-known-limitations)
43
+ - [Additional Information](#additional-information)
44
+ - [Dataset Curators](#dataset-curators)
45
+ - [Licensing Information](#licensing-information)
46
+ - [Citation Information](#citation-information)
47
+
48
+ ## Dataset Description
49
+
50
+ - **Homepage:** http://arxiv.org/abs/1702.02363
51
+ - **Repository:** [Needs More Information]
52
+ - **Paper:** http://arxiv.org/abs/1702.02363
53
+ - **Leaderboard:** [Needs More Information]
54
+ - **Point of Contact:** [email protected]
55
+
56
+ ### Dataset Summary
57
+
58
+ Automatically annotated Turkish corpus for named entity recognition and text categorization using large-scale gazetteers. The constructed gazetteers contains approximately 300K entities with thousands of fine-grained entity types under 25 different domains.
59
+
60
+ ### Supported Tasks and Leaderboards
61
+
62
+ [Needs More Information]
63
+
64
+ ### Languages
65
+
66
+ Turkish
67
+
68
+ ## Dataset Structure
69
+
70
+ ### Data Instances
71
+
72
+ [More Information Needed]
73
+
74
+ ### Data Fields
75
+
76
+ [More Information Needed]
77
+
78
+ ### Data Splits
79
+
80
+ There's only the training set.
81
+
82
+ ## Dataset Creation
83
+
84
+ ### Curation Rationale
85
+
86
+ [More Information Needed]
87
+
88
+ ### Source Data
89
+
90
+ #### Initial Data Collection and Normalization
91
+
92
+ [More Information Needed]
93
+
94
+ #### Who are the source language producers?
95
+
96
+ [More Information Needed]
97
+
98
+ ### Annotations
99
+
100
+ #### Annotation process
101
+
102
+ [More Information Needed]
103
+
104
+ #### Who are the annotators?
105
+
106
+ [More Information Needed]
107
+
108
+ ### Personal and Sensitive Information
109
+
110
+ [More Information Needed]
111
+
112
+ ## Considerations for Using the Data
113
+
114
+ ### Social Impact of Dataset
115
+
116
+ [More Information Needed]
117
+
118
+ ### Discussion of Biases
119
+
120
+ [More Information Needed]
121
+
122
+ ### Other Known Limitations
123
+
124
+ [More Information Needed]
125
+
126
+ ## Additional Information
127
+
128
+ ### Dataset Curators
129
+
130
+ H. Bahadir Sahin, Caglar Tirkaz, Eray Yildiz, Mustafa Tolga Eren and Omer Ozan Sonmez
131
+
132
+ ### Licensing Information
133
+
134
+ Creative Commons Attribution 4.0 International
135
+
136
+ ### Citation Information
137
+
138
+ @InProceedings@article{DBLP:journals/corr/SahinTYES17,
139
+ author = {H. Bahadir Sahin and
140
+ Caglar Tirkaz and
141
+ Eray Yildiz and
142
+ Mustafa Tolga Eren and
143
+ Omer Ozan Sonmez},
144
+ title = {Automatically Annotated Turkish Corpus for Named Entity Recognition
145
+ and Text Categorization using Large-Scale Gazetteers},
146
+ journal = {CoRR},
147
+ volume = {abs/1702.02363},
148
+ year = {2017},
149
+ url = {http://arxiv.org/abs/1702.02363},
150
+ archivePrefix = {arXiv},
151
+ eprint = {1702.02363},
152
+ timestamp = {Mon, 13 Aug 2018 16:46:36 +0200},
153
+ biburl = {https://dblp.org/rec/journals/corr/SahinTYES17.bib},
154
+ bibsource = {dblp computer science bibliography, https://dblp.org}
155
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "Turkish Wikipedia Named-Entity Recognition and Text Categorization\n(TWNERTC) dataset is a collection of automatically categorized and annotated\nsentences obtained from Wikipedia. The authors constructed large-scale\ngazetteers by using a graph crawler algorithm to extract\nrelevant entity and domain information\nfrom a semantic knowledge base, Freebase.\nThe constructed gazetteers contains approximately\n300K entities with thousands of fine-grained entity types\nunder 77 different domains.\n", "citation": "@InProceedings@article{DBLP:journals/corr/SahinTYES17,\n author = {H. Bahadir Sahin and\n Caglar Tirkaz and\n Eray Yildiz and\n Mustafa Tolga Eren and\n Omer Ozan Sonmez},\n title = {Automatically Annotated Turkish Corpus for Named Entity Recognition\n and Text Categorization using Large-Scale Gazetteers},\n journal = {CoRR},\n volume = {abs/1702.02363},\n year = {2017},\n url = {http://arxiv.org/abs/1702.02363},\n archivePrefix = {arXiv},\n eprint = {1702.02363},\n timestamp = {Mon, 13 Aug 2018 16:46:36 +0200},\n biburl = {https://dblp.org/rec/journals/corr/SahinTYES17.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://data.mendeley.com/datasets/cdcztymf4k/1", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "domain": {"num_classes": 25, "names": ["architecture", "basketball", "book", "business", "education", "fictional_universe", "film", "food", "geography", "government", "law", "location", "military", "music", "opera", "organization", "people", "religion", "royalty", "soccer", "sports", "theater", "time", "travel", "tv"], "names_file": null, "id": null, "_type": "ClassLabel"}, "ner_tags": {"feature": {"num_classes": 9, "names": ["O", "B-PERSON", "I-PERSON", "B-ORGANIZATION", "I-ORGANIZATION", "B-LOCATION", "I-LOCATION", "B-MISC", "I-MISC"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "turkish_ner", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 177658278, "num_examples": 532629, "dataset_name": "turkish_ner"}}, "download_checksums": {"https://data.mendeley.com/public-files/datasets/cdcztymf4k/files/5557ef78-7d53-4a01-8241-3173c47bbe10/file_downloaded": {"num_bytes": 204393976, "checksum": "e03e2867a225d63f0139dd4ced028e5da795a8a48e140ad4c17999a8560dbc57"}}, "download_size": 204393976, "post_processing_size": null, "dataset_size": 177658278, "size_in_bytes": 382052254}}
dummy/0.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce14f1ea12dda073ba35fe0381e477c03c176f8c4d192d87b72977846b89c9d
3
+ size 5801
turkish_ner.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import logging
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ # TODO: Add BibTeX citation
26
+ # Find for instance the citation on arxiv or on the dataset repo/website
27
+ _CITATION = """\
28
+ @InProceedings@article{DBLP:journals/corr/SahinTYES17,
29
+ author = {H. Bahadir Sahin and
30
+ Caglar Tirkaz and
31
+ Eray Yildiz and
32
+ Mustafa Tolga Eren and
33
+ Omer Ozan Sonmez},
34
+ title = {Automatically Annotated Turkish Corpus for Named Entity Recognition
35
+ and Text Categorization using Large-Scale Gazetteers},
36
+ journal = {CoRR},
37
+ volume = {abs/1702.02363},
38
+ year = {2017},
39
+ url = {http://arxiv.org/abs/1702.02363},
40
+ archivePrefix = {arXiv},
41
+ eprint = {1702.02363},
42
+ timestamp = {Mon, 13 Aug 2018 16:46:36 +0200},
43
+ biburl = {https://dblp.org/rec/journals/corr/SahinTYES17.bib},
44
+ bibsource = {dblp computer science bibliography, https://dblp.org}
45
+ }
46
+ """
47
+
48
+ # TODO: Add description of the dataset here
49
+ # You can copy an official description
50
+ _DESCRIPTION = """\
51
+ Turkish Wikipedia Named-Entity Recognition and Text Categorization
52
+ (TWNERTC) dataset is a collection of automatically categorized and annotated
53
+ sentences obtained from Wikipedia. The authors constructed large-scale
54
+ gazetteers by using a graph crawler algorithm to extract
55
+ relevant entity and domain information
56
+ from a semantic knowledge base, Freebase.
57
+ The constructed gazetteers contains approximately
58
+ 300K entities with thousands of fine-grained entity types
59
+ under 77 different domains.
60
+ """
61
+
62
+ # TODO: Add a link to an official homepage for the dataset here
63
+ _HOMEPAGE = "https://data.mendeley.com/datasets/cdcztymf4k/1"
64
+
65
+ # TODO: Add the licence for the dataset here if you can find it
66
+ _LICENSE = "Creative Commons Attribution 4.0 International"
67
+
68
+ _URL = "https://data.mendeley.com/public-files/datasets/cdcztymf4k/files/5557ef78-7d53-4a01-8241-3173c47bbe10/file_downloaded"
69
+
70
+
71
+ _FILE_NAME_ZIP = "TWNERTC_TC_Coarse Grained NER_DomainIndependent_NoiseReduction.zip"
72
+ _FILE_NAME = "TWNERTC_TC_Coarse Grained NER_DomainIndependent_NoiseReduction.DUMP"
73
+
74
+
75
+ class TurkishNER(datasets.GeneratorBasedBuilder):
76
+ """TODO: Short description of my dataset."""
77
+
78
+ def _info(self):
79
+ return datasets.DatasetInfo(
80
+ description=_DESCRIPTION,
81
+ features=datasets.Features(
82
+ {
83
+ "id": datasets.Value("string"),
84
+ "tokens": datasets.Sequence(datasets.Value("string")),
85
+ "domain": datasets.ClassLabel(
86
+ names=[
87
+ "architecture",
88
+ "basketball",
89
+ "book",
90
+ "business",
91
+ "education",
92
+ "fictional_universe",
93
+ "film",
94
+ "food",
95
+ "geography",
96
+ "government",
97
+ "law",
98
+ "location",
99
+ "military",
100
+ "music",
101
+ "opera",
102
+ "organization",
103
+ "people",
104
+ "religion",
105
+ "royalty",
106
+ "soccer",
107
+ "sports",
108
+ "theater",
109
+ "time",
110
+ "travel",
111
+ "tv",
112
+ ]
113
+ ),
114
+ "ner_tags": datasets.Sequence(
115
+ datasets.features.ClassLabel(
116
+ names=[
117
+ "O",
118
+ "B-PERSON",
119
+ "I-PERSON",
120
+ "B-ORGANIZATION",
121
+ "I-ORGANIZATION",
122
+ "B-LOCATION",
123
+ "I-LOCATION",
124
+ "B-MISC",
125
+ "I-MISC",
126
+ ]
127
+ )
128
+ ),
129
+ }
130
+ ),
131
+ supervised_keys=None,
132
+ # Homepage of the dataset for documentation
133
+ homepage=_HOMEPAGE,
134
+ # License for the dataset if available
135
+ license=_LICENSE,
136
+ # Citation for the dataset
137
+ citation=_CITATION,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager):
141
+ """Returns SplitGenerators."""
142
+ data_dir = dl_manager.extract(os.path.join(dl_manager.download_and_extract(_URL), _FILE_NAME_ZIP))
143
+ return [
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.TRAIN,
146
+ gen_kwargs={
147
+ "filepath": (os.path.join(data_dir, _FILE_NAME)),
148
+ "split": "train",
149
+ },
150
+ ),
151
+ ]
152
+
153
+ def _generate_examples(self, filepath, split):
154
+ """ Yields examples. """
155
+ logging.info("⏳ Generating examples from = %s", filepath)
156
+
157
+ with open(filepath, encoding="utf-8") as f:
158
+ id_ = -1
159
+ for line in f:
160
+ if line == "" or line == "\n":
161
+ continue
162
+ else:
163
+ splits = line.split("\t")
164
+ id_ += 1
165
+ yield id_, {
166
+ "id": str(id_),
167
+ "domain": splits[0],
168
+ "tokens": splits[2].split(" "),
169
+ "ner_tags": splits[1].split(" "),
170
+ }