tanfiona commited on
Commit
a77db27
1 Parent(s): ca703d2

Update causenet_wiki.py

Browse files
Files changed (1) hide show
  1. causenet_wiki.py +97 -0
causenet_wiki.py CHANGED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import datasets
4
+ from datasets.tasks import TextClassification
5
+
6
+
7
+ _CITATION = """\
8
+ @inproceedings{heindorf2020causenet,
9
+ author = {Stefan Heindorf and
10
+ Yan Scholten and
11
+ Henning Wachsmuth and
12
+ Axel-Cyrille Ngonga Ngomo and
13
+ Martin Potthast},
14
+ title = CauseNet: Towards a Causality Graph Extracted from the Web,
15
+ booktitle = CIKM,
16
+ publisher = ACM,
17
+ year = 2020
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ Crawled Wikipedia Data from CIKM 2020 paper
23
+ 'CauseNet: Towards a Causality Graph Extracted from the Web.'
24
+ """
25
+ _URL = "https://github.com/causenet-org/CIKM-20"
26
+
27
+ # use dl=1 to force browser to download data instead of displaying it
28
+ _TRAIN_DOWNLOAD_URL = "https://groups.uni-paderborn.de/wdqa/causenet/causality-graphs/extraction/wikipedia/wikipedia-extraction.tsv"
29
+
30
+
31
+ class CauseNetWikiCorpus(datasets.GeneratorBasedBuilder):
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features({
36
+ "cause_word": datasets.Value("string"),
37
+ "cause_id": datasets.Value("int64"),
38
+ "effect_word": datasets.Value("string"),
39
+ "effect_id": datasets.Value("int64"),
40
+ "pattern": datasets.Value("string"),
41
+ "sentence": datasets.Value("string"),
42
+ "dependencies": datasets.Value("string")
43
+ }
44
+ ),
45
+ homepage=_URL,
46
+ citation=_CITATION,
47
+ task_templates=None
48
+ )
49
+
50
+ def _split_generators(self, dl_manager):
51
+ """Returns SplitGenerators."""
52
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
53
+ return [
54
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path})
55
+ ]
56
+
57
+
58
+ def is_valid_article(self, title):
59
+ forbidden_title_parts = ['Wikipedia:', 'Template:', 'File:',
60
+ 'Portal:', 'Category:', 'Draft:',
61
+ 'List of', 'disambiguation']
62
+
63
+ contains_forbidden_title_part = False
64
+ for forbidden_title_part in forbidden_title_parts:
65
+ if forbidden_title_part in title:
66
+ contains_forbidden_title_part = True
67
+ break
68
+
69
+ return not contains_forbidden_title_part
70
+
71
+
72
+ def _generate_examples(self, filepath):
73
+ """
74
+ Generate examples.
75
+ We are reading csv files with the following columns: sentenceID | gold_label | sentence.
76
+ """
77
+ for id_, line in enumerate(open(filepath, encoding="utf-8")):
78
+ parts = line.strip().split('\t')
79
+ if parts[0] != 'wikipedia_sentence':
80
+ continue
81
+ assert len(parts) == 11
82
+
83
+ if not self.is_valid_article(parts[2]):
84
+ continue
85
+
86
+ for match in json.loads(parts[10]):
87
+ sentence_data = {
88
+ "cause_word": match['Cause'][0],
89
+ "cause_id": match['Cause'][1],
90
+ "effect_word": match['Effect'][0],
91
+ "effect_id": match['Effect'][1],
92
+ "pattern": match['Pattern'],
93
+ "sentence": json.loads(parts[7]),
94
+ "dependencies": json.loads(parts[9])
95
+ }
96
+
97
+ yield id_, sentence_data