File size: 4,294 Bytes
f875294
 
 
 
 
 
 
 
7cc9517
f875294
 
 
 
 
 
 
 
 
 
 
7cc9517
f257225
f875294
 
 
 
 
ae059f8
f875294
 
 
63c2642
 
 
 
7cc9517
3fcab2f
d7e8703
 
 
 
 
63c2642
f875294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63c2642
 
f875294
 
 
 
8ccb0ac
63c2642
f875294
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""Multi domain document classification dataset used in [https://arxiv.org/pdf/2004.10964.pdf](https://arxiv.org/pdf/2004.10964.pdf)"""
import json
from itertools import chain
import datasets

logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """Multi domain document classification dataset used in [https://arxiv.org/pdf/2004.10964.pdf](https://arxiv.org/pdf/2004.10964.pdf)"""
_NAME = "multi_domain_document_classification"
_VERSION = "0.2.3"
_CITATION = """
@inproceedings{dontstoppretraining2020,
 author = {Suchin Gururangan and Ana Marasović and Swabha Swayamdipta and Kyle Lo and Iz Beltagy and Doug Downey and Noah A. Smith},
 title = {Don't Stop Pretraining: Adapt Language Models to Domains and Tasks},
 year = {2020},
 booktitle = {Proceedings of ACL},
}
"""

_HOME_PAGE = "https://github.com/asahi417/m3"
_URL = f'https://huggingface.co/datasets/asahi417/{_NAME}/raw/main/dataset'
_DATA_TYPE = ["chemprot", "citation_intent", "hyperpartisan_news", "rct_sample", "sciie", "amcd",
              "yelp_review", "tweet_eval_irony", "tweet_eval_hate", "tweet_eval_emotion"]
_URLS = {
    k:
        {
            str(datasets.Split.TEST): [f'{_URL}/{k}/test.jsonl'],
            str(datasets.Split.TRAIN): [f'{_URL}/{k}/train.jsonl'],
            str(datasets.Split.VALIDATION): [f'{_URL}/{k}/dev.jsonl']
        }
    for k in _DATA_TYPE
}
_LABELS = {
    "chemprot": {"ACTIVATOR": 0, "AGONIST": 1, "AGONIST-ACTIVATOR": 2, "AGONIST-INHIBITOR": 3, "ANTAGONIST": 4, "DOWNREGULATOR": 5, "INDIRECT-DOWNREGULATOR": 6, "INDIRECT-UPREGULATOR": 7, "INHIBITOR": 8, "PRODUCT-OF": 9, "SUBSTRATE": 10, "SUBSTRATE_PRODUCT-OF": 11, "UPREGULATOR": 12},
    "citation_intent": {"Background": 0, "CompareOrContrast": 1, "Extends": 2, "Future": 3, "Motivation": 4, "Uses": 5},
    "hyperpartisan_news": {"false": 0, "true": 1},
    "rct_sample": {"BACKGROUND": 0, "CONCLUSIONS": 1, "METHODS": 2, "OBJECTIVE": 3, "RESULTS": 4},
    "sciie": {"COMPARE": 0, "CONJUNCTION": 1, "EVALUATE-FOR": 2, "FEATURE-OF": 3, "HYPONYM-OF": 4, "PART-OF": 5, "USED-FOR": 6},
    "amcd": {"false": 0, "true": 1},
    "yelp_review": {"5 star": 4, "4 star": 3, "3 star": 2, "2 star": 1, "1 star": 0},
    "tweet_eval_irony": {"non_irony":0, "irony": 1},
    "tweet_eval_hate": {"non_hate": 0, "hate": 1},
    "tweet_eval_emotion": {"anger": 0, "joy": 1, "optimism": 2, "sadness": 3}
}


class MultiDomainDocumentClassificationConfig(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        """BuilderConfig.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(MultiDomainDocumentClassificationConfig, self).__init__(**kwargs)


class MultiDomainDocumentClassification(datasets.GeneratorBasedBuilder):
    """Dataset."""

    BUILDER_CONFIGS = [
        MultiDomainDocumentClassificationConfig(
            name=k, version=datasets.Version(_VERSION), description=_DESCRIPTION
        ) for k in _DATA_TYPE
    ]

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
        return [
            datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
            for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
        ]

    def _generate_examples(self, filepaths):
        _key = 0
        for filepath in filepaths:
            logger.info(f"generating examples from = {filepath}")
            with open(filepath, encoding="utf-8") as f:
                _list = [i for i in f.read().split('\n') if len(i) > 0]
                for i in _list:
                    data = json.loads(i)
                    yield _key, data
                    _key += 1

    def _info(self):
        label2id = sorted(_LABELS[self.config.name].items(), key=lambda x: x[1])
        label = [i[0] for i in label2id]
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "text": datasets.Value("string"),
                    "label": datasets.features.ClassLabel(names=label),
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
        )