File size: 3,568 Bytes
a77db27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415dba
a77db27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1415dba
a77db27
1415dba
 
a77db27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import csv
import json
import datasets


_CITATION = """\
@inproceedings{heindorf2020causenet,
  author    = {Stefan Heindorf and
               Yan Scholten and
               Henning Wachsmuth and
               Axel-Cyrille Ngonga Ngomo and
               Martin Potthast},
  title     = CauseNet: Towards a Causality Graph Extracted from the Web,
  booktitle = CIKM,
  publisher = ACM,
  year      = 2020
}
"""

_DESCRIPTION = """\
Crawled Wikipedia Data from CIKM 2020 paper 
'CauseNet: Towards a Causality Graph Extracted from the Web.' 
"""
_URL = "https://github.com/causenet-org/CIKM-20"

# use dl=1 to force browser to download data instead of displaying it
_TRAIN_DOWNLOAD_URL = "https://groups.uni-paderborn.de/wdqa/causenet/causality-graphs/extraction/wikipedia/wikipedia-extraction.tsv"
_DEMO_DOWNLOAD_URL = "https://drive.google.com/uc?export=download&id=11gKbdn77ngBJr2C1iCK-YUDu9mWbXWx2"

class CauseNetWikiCorpus(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "cause_word": datasets.Value("string"), 
                "cause_id": datasets.Value("int64"),
                "effect_word": datasets.Value("string"),
                "effect_id": datasets.Value("int64"),
                "pattern": datasets.Value("string"), 
                "sentence": datasets.Value("string"), 
                "dependencies": datasets.Value("string")
                }
            ),
            homepage=_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
        demo_path = dl_manager.download_and_extract(_DEMO_DOWNLOAD_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
            datasets.SplitGenerator(name="demo", gen_kwargs={"filepath": demo_path})
        ]


    def is_valid_article(self, title):
        forbidden_title_parts = ['Wikipedia:', 'Template:', 'File:',
                                'Portal:', 'Category:', 'Draft:',
                                'List of', 'disambiguation']

        contains_forbidden_title_part = False
        for forbidden_title_part in forbidden_title_parts:
            if forbidden_title_part in title:
                contains_forbidden_title_part = True
                break

        return not contains_forbidden_title_part


    def _generate_examples(self, filepath):
        """
        Generate examples.
        We are reading csv files with the following columns: sentenceID	| gold_label | sentence. 
        """
        for id_, line in enumerate(open(filepath, encoding="utf-8")):
            parts = line.strip().split('\t')
            if parts[0] != 'wikipedia_sentence':
                continue
            assert len(parts) == 11

            if not self.is_valid_article(parts[2]):
                continue

            for match in json.loads(parts[10]):
                sentence_data = {
                    "cause_word": match['Cause'][0],
                    "cause_id": match['Cause'][1],
                    "effect_word": match['Effect'][0],
                    "effect_id": match['Effect'][1],
                    "pattern": match['Pattern'],
                    "sentence": json.loads(parts[7]),
                    "dependencies": json.loads(parts[9])
                }
            
            yield id_, sentence_data