Datasets:
initial commit
Browse files- dataset_infos.json +1 -0
- lsoie.py +151 -0
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"wiki": {"description": "\nThe Large Scale Open Information Extraction Dataset (LSOIE), is a dataset 20 \ntimes larger than the next largest human-annotated Open Information Extraction\n(OIE) dataset. LSOIE is a built upon the QA-SRL 2.0 dataset.\n", "citation": "@article{lsoie-2021,\n title={{LSOIE}: A Large-Scale Dataset for Supervised Open Information Extraction},\n author={{Solawetz}, Jacob and {Larson}, Stefan},\n journal={arXiv preprint arXiv:2101.11177},\n year={2019},\n url=\"https://arxiv.org/pdf/2101.11177.pdf\"\n}\n", "homepage": "https://github.com/Jacobsolawetz/large-scale-oie/", "license": "", "features": {"word_ids": {"feature": {"dtype": "int16", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "words": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "pred": {"dtype": "string", "id": null, "_type": "Value"}, "pred_ids": {"feature": {"dtype": "int16", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "head_pred_id": {"dtype": "int16", "id": null, "_type": "Value"}, "sent_id": {"dtype": "int16", "id": null, "_type": "Value"}, "run_id": {"dtype": "int16", "id": null, "_type": "Value"}, "label": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": {"input": "word_ids", "output": "label"}, "task_templates": null, "builder_name": "lsoie", "config_name": "wiki", "version": "0.0.0", "splits": {"train": {"name": "train", "num_bytes": 24938522, "num_examples": 46016, "dataset_name": "lsoie"}, "validation": {"name": "validation", "num_bytes": 2880854, "num_examples": 5269, "dataset_name": "lsoie"}, "test": {"name": "test", "num_bytes": 2840517, "num_examples": 5374, "dataset_name": "lsoie"}}, "download_checksums": {"https://github.com/Jacobsolawetz/large-scale-oie/raw/master/dataset_creation/lsoie_data/lsoie_data.zip": {"num_bytes": 19799926, "checksum": "0d189a3a8fef4b9f9efdad8faf0f53fc53805f9b2ad5354926e09c1449a00330"}}, "download_size": 19799926, "post_processing_size": null, "dataset_size": 30659893, "size_in_bytes": 50459819}}
|
lsoie.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""LSOIE: A Large-Scale Dataset for Supervised Open Information Extraction."""
|
3 |
+
import os
|
4 |
+
import datasets
|
5 |
+
from datasets.info import SupervisedKeysData
|
6 |
+
from zipfile import ZipFile
|
7 |
+
logger = datasets.logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
_CITATION = """\
|
11 |
+
@article{lsoie-2021,
|
12 |
+
title={{LSOIE}: A Large-Scale Dataset for Supervised Open Information Extraction},
|
13 |
+
author={{Solawetz}, Jacob and {Larson}, Stefan},
|
14 |
+
journal={arXiv preprint arXiv:2101.11177},
|
15 |
+
year={2019},
|
16 |
+
url="https://arxiv.org/pdf/2101.11177.pdf"
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_DESCRIPTION = """
|
21 |
+
The Large Scale Open Information Extraction Dataset (LSOIE), is a dataset 20
|
22 |
+
times larger than the next largest human-annotated Open Information Extraction
|
23 |
+
(OIE) dataset. LSOIE is a built upon the QA-SRL 2.0 dataset.
|
24 |
+
"""
|
25 |
+
|
26 |
+
_URL = "https://github.com/Jacobsolawetz/large-scale-oie/"
|
27 |
+
_URLS = {
|
28 |
+
"zip": _URL+"raw/master/dataset_creation/lsoie_data/lsoie_data.zip"
|
29 |
+
}
|
30 |
+
_ARCHIVE_FILES = [
|
31 |
+
"lsoie_science_train.conll",
|
32 |
+
"lsoie_science_dev.conll",
|
33 |
+
"lsoie_science_test.conll",
|
34 |
+
"lsoie_wiki_train.conll",
|
35 |
+
"lsoie_wiki_dev.conll",
|
36 |
+
"lsoie_wiki_test.conll",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
class LsoieConfig(datasets.BuilderConfig):
|
41 |
+
"""BuilderConfig for LSOIE."""
|
42 |
+
|
43 |
+
def __init__(self,subset="wiki", **kwargs):
|
44 |
+
"""BuilderConfig for LSOIE.
|
45 |
+
Args:
|
46 |
+
subset: str - either "wiki" or "science"
|
47 |
+
**kwargs: keyword arguments forwarded to super.
|
48 |
+
"""
|
49 |
+
super(LsoieConfig, self).__init__(**kwargs)
|
50 |
+
self.subset=subset
|
51 |
+
|
52 |
+
|
53 |
+
class Lsoie(datasets.GeneratorBasedBuilder):
|
54 |
+
"""LSOIE: A Large-Scale Dataset for Supervised Open Information Extraction"""
|
55 |
+
|
56 |
+
BUILDER_CONFIGS = [
|
57 |
+
LsoieConfig(
|
58 |
+
name="wiki",
|
59 |
+
description="LSOIE dataset from wikipedia and wikinews",
|
60 |
+
subset="wiki",
|
61 |
+
),
|
62 |
+
LsoieConfig(
|
63 |
+
name="sci",
|
64 |
+
description="LSOIE dataset build over scientific domain",
|
65 |
+
subset="sci",
|
66 |
+
),
|
67 |
+
]
|
68 |
+
|
69 |
+
DEFAULT_CONFIG_NAME = "wiki"
|
70 |
+
|
71 |
+
def _info(self):
|
72 |
+
return datasets.DatasetInfo(
|
73 |
+
description=_DESCRIPTION,
|
74 |
+
features=datasets.Features(
|
75 |
+
{
|
76 |
+
"word_ids": datasets.Sequence(datasets.Value("int16")),
|
77 |
+
"words": datasets.Sequence(datasets.Value("string")),
|
78 |
+
"pred": datasets.Value("string"),
|
79 |
+
"pred_ids": datasets.Sequence(datasets.Value("int16")),
|
80 |
+
"head_pred_id": datasets.Value("int16"),
|
81 |
+
"sent_id": datasets.Value("int16"),
|
82 |
+
"run_id": datasets.Value("int16"),
|
83 |
+
"label": datasets.Sequence(datasets.Value("string")),
|
84 |
+
}
|
85 |
+
),
|
86 |
+
supervised_keys=SupervisedKeysData(input="word_ids",output="label"),
|
87 |
+
homepage=_URL,
|
88 |
+
citation=_CITATION,
|
89 |
+
#there is no default task for open information extraction yet
|
90 |
+
#task_templates=[
|
91 |
+
# OpenInformationExtraction(
|
92 |
+
# question_column="question", context_column="context", answers_column="answers"
|
93 |
+
# )
|
94 |
+
#],
|
95 |
+
)
|
96 |
+
|
97 |
+
def _split_generators(self, dl_manager):
|
98 |
+
downloaded_archive = dl_manager.download(_URLS)['zip']
|
99 |
+
#name_pre=os.path.join("lsoie_data","lsoie_")+self.config.subset+"_"
|
100 |
+
name_pre="lsoie_"+self.config.subset+"_"
|
101 |
+
return [
|
102 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
103 |
+
gen_kwargs={
|
104 |
+
"archive_path": downloaded_archive,
|
105 |
+
"file_name": name_pre+"train.conll",
|
106 |
+
}),
|
107 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION,
|
108 |
+
gen_kwargs={
|
109 |
+
"archive_path": downloaded_archive,
|
110 |
+
"file_name": name_pre+"dev.conll",
|
111 |
+
}),
|
112 |
+
datasets.SplitGenerator(name=datasets.Split.TEST,
|
113 |
+
gen_kwargs={
|
114 |
+
"archive_path": downloaded_archive,
|
115 |
+
"file_name": name_pre+"test.conll",
|
116 |
+
}),
|
117 |
+
]
|
118 |
+
|
119 |
+
def _generate_examples(self,archive_path,file_name):
|
120 |
+
"""This functions returns the samples in a raw format"""
|
121 |
+
logger.info("generating examples from archive:{}".format(archive_path))
|
122 |
+
columns={'word_ids':int,
|
123 |
+
'words':str,
|
124 |
+
'pred':str,
|
125 |
+
'pred_ids':lambda x: [ num for num in x.strip('[]').split(',')],
|
126 |
+
'head_pred_id': int,
|
127 |
+
'sent_id':int,
|
128 |
+
'run_id': int,
|
129 |
+
'label':str}
|
130 |
+
list_columns=["word_ids","words","label"]
|
131 |
+
sep="\t"
|
132 |
+
key=0
|
133 |
+
sentence=dict()
|
134 |
+
for column in list_columns:
|
135 |
+
sentence[column]=[]
|
136 |
+
with ZipFile(archive_path) as zipfile:
|
137 |
+
with zipfile.open('lsoie_data/'+file_name,mode='r') as file:
|
138 |
+
for line in file:
|
139 |
+
line=line.decode("utf-8").strip('\n').split(sep=sep)
|
140 |
+
if line[0]=='':
|
141 |
+
yield key, sentence
|
142 |
+
key+=1
|
143 |
+
for column in list_columns:
|
144 |
+
sentence[column]=[]
|
145 |
+
continue
|
146 |
+
for column, val in zip(columns.keys(),line):
|
147 |
+
val=columns[column](val)
|
148 |
+
if column in list_columns:
|
149 |
+
sentence[column].append(val)
|
150 |
+
else:
|
151 |
+
sentence[column]=val
|