fabner / fabner.py
phucdev's picture
Upload 2 files
e0f61d5 verified
raw
history blame
10.6 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FabNER is a manufacturing text corpus of 350,000+ words for Named Entity Recognition."""
import datasets
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{DBLP:journals/jim/KumarS22,
author = {Aman Kumar and
Binil Starly},
title = {"FabNER": information extraction from manufacturing process science
domain literature using named entity recognition},
journal = {J. Intell. Manuf.},
volume = {33},
number = {8},
pages = {2393--2407},
year = {2022},
url = {https://doi.org/10.1007/s10845-021-01807-x},
doi = {10.1007/s10845-021-01807-x},
timestamp = {Sun, 13 Nov 2022 17:52:57 +0100},
biburl = {https://dblp.org/rec/journals/jim/KumarS22.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
# You can copy an official description
_DESCRIPTION = """\
FabNER is a manufacturing text corpus of 350,000+ words for Named Entity Recognition.
It is a collection of abstracts obtained from Web of Science through known journals available in manufacturing process
science research.
For every word, there were categories/entity labels defined namely Material (MATE), Manufacturing Process (MANP),
Machine/Equipment (MACEQ), Application (APPL), Features (FEAT), Mechanical Properties (PRO), Characterization (CHAR),
Parameters (PARA), Enabling Technology (ENAT), Concept/Principles (CONPRI), Manufacturing Standards (MANS) and
BioMedical (BIOP). Annotation was performed in all categories along with the output tag in 'BIOES' format:
B=Beginning, I-Intermediate, O=Outside, E=End, S=Single.
"""
_HOMEPAGE = "https://figshare.com/articles/dataset/Dataset_NER_Manufacturing_-_FabNER_Information_Extraction_from_Manufacturing_Process_Science_Domain_Literature_Using_Named_Entity_Recognition/14782407"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"train": "https://figshare.com/ndownloader/files/28405854/S2-train.txt",
"validation": "https://figshare.com/ndownloader/files/28405857/S3-val.txt",
"test": "https://figshare.com/ndownloader/files/28405851/S1-test.txt",
}
def map_fabner_labels(string_tag):
tag = string_tag[2:]
# MATERIAL (FABNER)
if tag == "MATE":
return "Material"
# MANUFACTURING PROCESS (FABNER)
elif tag == "MANP":
return "Method"
# MACHINE/EQUIPMENT, MECHANICAL PROPERTIES, CHARACTERIZATION, ENABLING TECHNOLOGY (FABNER)
elif tag in ["MACEQ", "PRO", "CHAR", "ENAT"]:
return "Technological System"
# APPLICATION (FABNER)
elif tag == "APPL":
return "Technical Field"
# FEATURES, PARAMETERS, CONCEPT/PRINCIPLES, MANUFACTURING STANDARDS, BIOMEDICAL, O (FABNER)
else:
return "O"
class FabNER(datasets.GeneratorBasedBuilder):
"""FabNER is a manufacturing text corpus of 350,000+ words for Named Entity Recognition."""
VERSION = datasets.Version("1.2.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
# BUILDER_CONFIG_CLASS = MyBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="fabner", version=VERSION,
description="The FabNER dataset with the original BIOES tagging format"),
datasets.BuilderConfig(name="fabner_bio", version=VERSION,
description="The FabNER dataset with BIO tagging format"),
datasets.BuilderConfig(name="fabner_simple", version=VERSION,
description="The FabNER dataset with no tagging format"),
datasets.BuilderConfig(name="text2tech", version=VERSION,
description="The FabNER dataset mapped to the Text2Tech tag set"),
]
DEFAULT_CONFIG_NAME = "fabner"
def _info(self):
entity_types = [
"MATE", # Material
"MANP", # Manufacturing Process
"MACEQ", # Machine/Equipment
"APPL", # Application
"FEAT", # Engineering Features
"PRO", # Mechanical Properties
"CHAR", # Process Characterization
"PARA", # Process Parameters
"ENAT", # Enabling Technology
"CONPRI", # Concept/Principles
"MANS", # Manufacturing Standards
"BIOP", # BioMedical
]
if self.config.name == "text2tech":
class_labels = ["O", "Technological System", "Method", "Material", "Technical Field"]
elif self.config.name == "fabner":
class_labels = ["O"]
for entity_type in entity_types:
class_labels.extend(
[
"B-" + entity_type,
"I-" + entity_type,
"E-" + entity_type,
"S-" + entity_type,
]
)
elif self.config.name == "fabner_bio":
class_labels = ["O"]
for entity_type in entity_types:
class_labels.extend(
[
"B-" + entity_type,
"I-" + entity_type,
]
)
else:
class_labels = ["O"] + entity_types
features = datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=class_labels
)
),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
downloaded_files = dl_manager.download_and_extract(_URLS)
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_files[str(i)]})
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath):
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
splits = line.split(" ")
tokens.append(splits[0])
ner_tag = splits[1].rstrip()
if self.config.name == "fabner_simple":
if ner_tag == "O":
ner_tag = "O"
else:
ner_tag = ner_tag.split("-")[1]
elif self.config.name == "fabner_bio":
if ner_tag == "O":
ner_tag = "O"
else:
ner_tag = ner_tag.replace("S-", "B-").replace("E-", "I-")
elif self.config.name == "text2tech":
ner_tag = map_fabner_labels(ner_tag)
ner_tags.append(ner_tag)
# last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}