asahi417 commited on
Commit
f875294
·
1 Parent(s): 92bc586
multi_domain_document_classification.py CHANGED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Multi domain document classification dataset used in [https://arxiv.org/pdf/2004.10964.pdf](https://arxiv.org/pdf/2004.10964.pdf)"""
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """Multi domain document classification dataset used in [https://arxiv.org/pdf/2004.10964.pdf](https://arxiv.org/pdf/2004.10964.pdf)"""
8
+ _NAME = "multi_domain_document_classification"
9
+ _VERSION = "0.0.0"
10
+ _CITATION = """
11
+ @inproceedings{dontstoppretraining2020,
12
+ author = {Suchin Gururangan and Ana Marasović and Swabha Swayamdipta and Kyle Lo and Iz Beltagy and Doug Downey and Noah A. Smith},
13
+ title = {Don't Stop Pretraining: Adapt Language Models to Domains and Tasks},
14
+ year = {2020},
15
+ booktitle = {Proceedings of ACL},
16
+ }
17
+ """
18
+
19
+ _HOME_PAGE = "https://github.com/asahi417/m3"
20
+ _URL = f'https://huggingface.co/datasets/asahi417/{_NAME}/raw/main/dataset'
21
+ _DATA_TYPE = ["chemprot", "citation_intent", "hyperpartisan_news", "rct-sample", "sciie"]
22
+ _URLS = {
23
+ k:
24
+ {
25
+ str(datasets.Split.TEST): [f'{_URL}/{k}/test.jsonl'],
26
+ str(datasets.Split.TRAIN): [f'{_URL}/{k}/train.jsonl'],
27
+ str(datasets.Split.VALIDATION): [f'{_URL}/{k}/valid.jsonl']
28
+ }
29
+ for k in _DATA_TYPE
30
+ }
31
+
32
+
33
+ class MultiDomainDocumentClassificationConfig(datasets.BuilderConfig):
34
+ """BuilderConfig"""
35
+
36
+ def __init__(self, **kwargs):
37
+ """BuilderConfig.
38
+
39
+ Args:
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+ super(MultiDomainDocumentClassificationConfig, self).__init__(**kwargs)
43
+
44
+
45
+ class MultiDomainDocumentClassification(datasets.GeneratorBasedBuilder):
46
+ """Dataset."""
47
+
48
+ BUILDER_CONFIGS = [
49
+ MultiDomainDocumentClassificationConfig(
50
+ name=k, version=datasets.Version(_VERSION), description=_DESCRIPTION
51
+ ) for k in _DATA_TYPE
52
+ ]
53
+
54
+ def _split_generators(self, dl_manager):
55
+ downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
56
+ return [
57
+ datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
58
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
59
+ ]
60
+
61
+ def _generate_examples(self, filepaths):
62
+ _key = 0
63
+ for filepath in filepaths:
64
+ logger.info(f"generating examples from = {filepath}")
65
+ with open(filepath, encoding="utf-8") as f:
66
+ _list = [i for i in f.read().split('\n') if len(i) > 0]
67
+ for i in _list:
68
+ data = json.loads(i)
69
+ yield _key, data
70
+ _key += 1
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "tokens": datasets.Sequence(datasets.Value("string")),
78
+ "tags": datasets.Sequence(datasets.Value("int32")),
79
+ }
80
+ ),
81
+ supervised_keys=None,
82
+ homepage=_HOME_PAGE,
83
+ citation=_CITATION,
84
+ )