|
import csv |
|
import json |
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{heindorf2020causenet, |
|
author = {Stefan Heindorf and |
|
Yan Scholten and |
|
Henning Wachsmuth and |
|
Axel-Cyrille Ngonga Ngomo and |
|
Martin Potthast}, |
|
title = CauseNet: Towards a Causality Graph Extracted from the Web, |
|
booktitle = CIKM, |
|
publisher = ACM, |
|
year = 2020 |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Crawled Wikipedia Data from CIKM 2020 paper |
|
'CauseNet: Towards a Causality Graph Extracted from the Web.' |
|
""" |
|
_URL = "https://github.com/causenet-org/CIKM-20" |
|
|
|
|
|
_TRAIN_DOWNLOAD_URL = "https://groups.uni-paderborn.de/wdqa/causenet/causality-graphs/extraction/wikipedia/wikipedia-extraction.tsv" |
|
_DEMO_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=11gKbdn77ngBJr2C1iCK-YUDu9mWbXWx2" |
|
|
|
class CauseNetWikiCorpus(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"cause_word": datasets.Value("string"), |
|
"cause_id": datasets.Value("int64"), |
|
"effect_word": datasets.Value("string"), |
|
"effect_id": datasets.Value("int64"), |
|
"pattern": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"dependencies": datasets.Value("string") |
|
} |
|
), |
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL) |
|
demo_path = dl_manager.download_and_extract(_DEMO_DOWNLOAD_URL) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}), |
|
datasets.SplitGenerator(name="demo", gen_kwargs={"filepath": demo_path}) |
|
] |
|
|
|
|
|
def is_valid_article(self, title): |
|
forbidden_title_parts = ['Wikipedia:', 'Template:', 'File:', |
|
'Portal:', 'Category:', 'Draft:', |
|
'List of', 'disambiguation'] |
|
|
|
contains_forbidden_title_part = False |
|
for forbidden_title_part in forbidden_title_parts: |
|
if forbidden_title_part in title: |
|
contains_forbidden_title_part = True |
|
break |
|
|
|
return not contains_forbidden_title_part |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
""" |
|
Generate examples. |
|
We are reading csv files with the following columns: sentenceID | gold_label | sentence. |
|
""" |
|
for id_, line in enumerate(open(filepath, encoding="utf-8")): |
|
parts = line.strip().split('\t') |
|
if parts[0] != 'wikipedia_sentence': |
|
continue |
|
assert len(parts) == 11 |
|
|
|
if not self.is_valid_article(parts[2]): |
|
continue |
|
|
|
for match in json.loads(parts[10]): |
|
sentence_data = { |
|
"cause_word": match['Cause'][0], |
|
"cause_id": match['Cause'][1], |
|
"effect_word": match['Effect'][0], |
|
"effect_id": match['Effect'][1], |
|
"pattern": match['Pattern'], |
|
"sentence": json.loads(parts[7]), |
|
"dependencies": json.loads(parts[9]) |
|
} |
|
|
|
yield id_, sentence_data |