LucasWeber commited on
Commit
87dce2c
·
verified ·
1 Parent(s): edc2bee

Create icl_consistency_test.py

Browse files
Files changed (1) hide show
  1. icl_consistency_test.py +125 -0
icl_consistency_test.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+
17
+ This 🤗 dataset provides data for the GenBench CBT task 'The ICL consistency test' (see https://github.com/GenBench/genbench_cbt/tree/main/src/genbench/tasks/icl_consistency_test).
18
+ The ICL consistency test measures the consistency of LLM predictions on the same data points across many different equivalent prompting setups.
19
+ The score in the associated metric (Cohen's kappa) can be understood as a measure of a model's prediction consistency in the face of task-irrelevant information.
20
+
21
+ For an easy evaluation of any 🤗 models, we refer to the code provided in the GenBench task. For in-depth information on the task, we refer to the associated
22
+ publications (Weber et al., 2023,2023) and the respective GenBench doc.md (https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md).
23
+
24
+ Evaluation on the relevant metrics can be done via the example_evaluation.py script in the GenBench repository.
25
+
26
+ - Weber, L., Bruni, E., & Hupkes, D. (2023, December). Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning.
27
+ In Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL) (pp. 294-313).
28
+ - Weber, L., Bruni, E., & Hupkes, D. (2023). The ICL Consistency Test. arXiv preprint arXiv:2312.04945.
29
+
30
+ """
31
+
32
+ import json
33
+ import os
34
+
35
+ import datasets
36
+
37
+ _CITATION = """\
38
+ @inproceedings{weber2023mind,
39
+ title={Mind the instructions: a holistic evaluation of consistency and interactions in prompt-based learning},
40
+ author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
41
+ booktitle={Proceedings of the 27th Conference on Computational Natural Language Learning (CoNLL)},
42
+ pages={294--313},
43
+ year={2023}
44
+ },
45
+ @article{weber2023icl,
46
+ title={The ICL Consistency Test},
47
+ author={Weber, Lucas and Bruni, Elia and Hupkes, Dieuwke},
48
+ journal={arXiv preprint arXiv:2312.04945},
49
+ year={2023}
50
+ }
51
+ """
52
+
53
+ _DESCRIPTION = """\
54
+ In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any 🤗 model.
55
+ The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
56
+ """
57
+
58
+ _HOMEPAGE = "https://github.com/GenBench/genbench_cbt/blob/main/src/genbench/tasks/icl_consistency_test/doc.md"
59
+
60
+ _LICENSE = ""
61
+
62
+ _URL = "https://huggingface.co/datasets/LucasWeber/icl_consistency_test/blob/main/"
63
+ _URLS = {
64
+ "anli": _URL + "genbench_all_anli.jsonl",
65
+ "mnli": _URL + "genbench_all_glue%2Bmnli.jsonl",
66
+ }
67
+
68
+
69
+ class ICLConsistencyTest(datasets.GeneratorBasedBuilder):
70
+ """
71
+ In prompting, models are sensitive to task-irrelevant information in their prompt. This test can be used to quantify this sensitivity of any 🤗 model.
72
+ The ICL consistency test does this by measuring a model's prediction consistency across many different semantically equivalent prompting setups.
73
+ """
74
+
75
+ VERSION = datasets.Version("1.1.0")
76
+
77
+ BUILDER_CONFIGS = [
78
+ datasets.BuilderConfig(name="anli", version=VERSION, description="This part of the ICL consistency test covers data points from ANLI"),
79
+ datasets.BuilderConfig(name="mnli", version=VERSION, description="This part of the ICL consistency test covers data points from MNLI"),
80
+ ]
81
+
82
+ def _info(self):
83
+ features = datasets.Features(
84
+ {
85
+ "input": datasets.Value("string"),
86
+ "target": datasets.Value("string"),
87
+ "target_numeric": datasets.Value("int32"),
88
+ "data_ID": datasets.Value("int32"),
89
+ "setup_ID": datasets.Value("string")
90
+ }
91
+ )
92
+
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ supervised_keys=("input", "target"),
97
+ homepage=_HOMEPAGE,
98
+ license=_LICENSE,
99
+ citation=_CITATION,
100
+ )
101
+
102
+ def _split_generators(self, dl_manager):
103
+ urls = _URLS[self.config.name]
104
+ data_dir = dl_manager.download_and_extract(urls)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={"filepath": data_dir[self.config.name],
110
+ "split": "test"},
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, filepath, split):
115
+ with open(filepath, encoding="utf-8") as f:
116
+ for key, row in enumerate(f):
117
+ data = json.loads(row)
118
+ yield key, {
119
+ "input": data["input"],
120
+ "target": data["target"],
121
+ "target_numeric": data["target_numeric"],
122
+ "data_ID": data["data_ID"],
123
+ "setup_ID": data["setup_ID"],
124
+ }
125
+