parquet-converter commited on
Commit
e820a2b
·
1 Parent(s): 8bea8b3

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,51 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zstandard filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79204e73708ff3e2b0525d439be51d14d4ac8e2688b096fe17d9f95b8283c439
3
+ size 39999
0-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1da1d6a6b4dd7815e8552630b8e479e6288288ee0c11a569945ff73300e1080
3
+ size 12602
1-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a57c49c3a20442b8f6ad9200a490c6ece49fc04ca200ceb07b247e8208b4b39
3
+ size 48226
1-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7ae29d0701172d76dd960e1cb972415b0ad1538fb30c806042ed5ae22567fd8
3
+ size 17883
10-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f7d688510d8913eb4e153700938545591c511ef400adb057063ea59b3fe166a
3
+ size 59424
10-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4efef1b149fd3806038898121dc814b131bfda765af2b5975883f2b0f2ca2e7c
3
+ size 22180
15-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c5b909ff1b80150993dceee83a97fd91aa25ec7f832bd6f3a80bc8e6ecc392c
3
+ size 65622
15-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a65a513eb59bb2a48e773d228fa8e79947a294d0f1e542e7f1181398ad5d31e6
3
+ size 23432
30-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60dcd2a44eb92d11c48143435924cf84cc1af60ee6108a0e46dc36634f74bf3a
3
+ size 83762
30-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:556f2d19d07e8c8e6a9d71c82d1f93fac99c87e6d543a689ee471ddf8c5a5105
3
+ size 26900
5-shot/ludwig-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1526e344d5a63749f95433af8c17d2fb410ba094501b2275e5cebcb5fcc0cf1
3
+ size 53297
5-shot/ludwig-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:420c2f94e28972488b11081b6b3243e90606b62724e529200fc7cb652e0bf2cc
3
+ size 20898
README.md DELETED
@@ -1,224 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - expert-generated
4
- language:
5
- - en
6
- language_creators:
7
- - expert-generated
8
- license:
9
- - cc-by-4.0
10
- multilinguality:
11
- - monolingual
12
- pretty_name: ludwig
13
- size_categories:
14
- - n<1K
15
- source_datasets:
16
- - original
17
- tags:
18
- - implicature
19
- - pragmatics
20
- - language
21
- - llm
22
- - conversation
23
- - dialogue
24
- task_categories:
25
- - text-generation
26
- - fill-mask
27
- task_ids:
28
- - language-modeling
29
- - masked-language-modeling
30
- ---
31
-
32
- # Dataset Card for LUDWIG
33
-
34
- ## Table of Contents
35
- - [Table of Contents](#table-of-contents)
36
- - [Dataset Description](#dataset-description)
37
- - [Dataset Summary](#dataset-summary)
38
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
39
- - [Languages](#languages)
40
- - [Dataset Structure](#dataset-structure)
41
- - [Data Instances](#data-instances)
42
- - [Data Fields](#data-fields)
43
- - [Data Splits](#data-splits)
44
- - [Dataset Creation](#dataset-creation)
45
- - [Curation Rationale](#curation-rationale)
46
- - [Source Data](#source-data)
47
- - [Annotations](#annotations)
48
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
49
- - [Considerations for Using the Data](#considerations-for-using-the-data)
50
- - [Social Impact of Dataset](#social-impact-of-dataset)
51
- - [Discussion of Biases](#discussion-of-biases)
52
- - [Other Known Limitations](#other-known-limitations)
53
- - [Additional Information](#additional-information)
54
- - [Dataset Curators](#dataset-curators)
55
- - [Licensing Information](#licensing-information)
56
- - [Citation Information](#citation-information)
57
- - [Contributions](#contributions)
58
-
59
- ## Dataset Description
60
-
61
- - **Homepage:**
62
- - **Repository: https://github.com/ucl-dark/ludwig**
63
- - **Paper: TODO**
64
- - **Leaderboard: TODO**
65
- - **Point of Contact: Laura Ruis**
66
-
67
- ### Dataset Summary
68
-
69
- LUDWIG (**L**anguage **U**nderstanding **W**ith **I**mplied meanin**G**) is a dataset containing English conversational implicatures.
70
- Implicature is the act of meaning or implying one thing by saying something else.
71
- There's different types of implicatures, from simple ones like "Some guests came to the party"
72
- (implying not all guests came) to more complicated implicatures that depend on context like
73
- "A: Are you going to the party this Friday? B: There's a global pandemic.", implying no. Implicatures serve a wide range of
74
- goals in communication: efficiency, style, navigating social interactions, and more. We cannot fully
75
- understand utterances without understanding their implications.
76
- The implicatures in this dataset are conversational because they come in utterance-response tuples.
77
- Each tuple has an implicature associated with it,
78
- which is the implied meaning of the response. For example:
79
-
80
- Utterance: Are you going to the party this Friday?
81
- Response: There's a global pandemic.
82
- Implicature: No.
83
-
84
- This dataset can be used to evaluate language models on their pragmatic language understanding.
85
-
86
- ### Supported Tasks and Leaderboards
87
-
88
- - ```text-generation```: The dataset can be used to evaluate a models ability to generate the correct next token, i.e. "yes" or "no", depending on the implicature. For example, if you pass the model an example wrapped in a template like "Esther asked 'Are you coming to the party this Friday' and Juan responded 'There's a global pandemic', which means" the correct completion would be "no". Success in this task can be determined by the ability to generate the correct answer or by the ability to give the right token a higher likelihood than the wrong token, e.g. p("no") > p("yes").
89
- - ```fill-mask```: The dataset can be used to evaluate a models ability to fill the correct token, i.e. "yes" or "no", depending on the implicature. For example, if you pass the model an example wrapped in a template like "Esther asked 'Are you coming to the party this Friday' and Juan responded 'There's a global pandemic', which means [mask]" the correct mask-fill would be "no". Success in this task can be determined by the ability to fill the correct answer or by the ability to give the right token a higher likelihood than the wrong token, e.g. p("no") > p("yes").
90
-
91
- ### Languages
92
-
93
- English
94
-
95
- ## Dataset Structure
96
-
97
- ### Data Instances
98
-
99
- Find below an example of a 1-shot example instance (1-shot because there's 1 prompt example).
100
- ```
101
- {
102
- "id": 1,
103
- "utterance": "Are you going to the party this Friday?",
104
- "response": "There's a global pandemic.",
105
- "implicature": "No.",
106
- "incoherent_implicature": "Yes".
107
- "prompts": [
108
- {
109
- "utterance": "Was that hot?",
110
- "response": "The sun was scorching.",
111
- "implicature": "Yes.",
112
- "incoherent_implicature": "No.".
113
- }
114
- ]
115
- }
116
- ```
117
-
118
- ### Data Fields
119
-
120
- ```
121
- {
122
- "id": int, # unique identifier of data points
123
- "utterance": str, # the utterance in this example
124
- "response": str, # the response in this example
125
- "implicature": str, # the implied meaning of the response, e.g. 'yes'
126
- "incoherent_implicature": str, # the wrong implied meaning, e.g. 'no'
127
- "prompts": [ # optional: prompt examples from the validation set
128
- {
129
- "utterance": str,
130
- "response": str,
131
- "implicature": str,
132
- "incoherent_implicature": str,
133
- }
134
- ]
135
- }
136
- ```
137
-
138
- ### Data Splits
139
-
140
- **Validation**: 118 instances that can be used for finetuning or few-shot learning
141
- **Test**: 600 instances that can be used for evaluating models.
142
-
143
- NB: the splits weren't originally part of the paper that presents this dataset. The same goes for the k-shot prompts. Added
144
- by @LauraRuis.
145
-
146
- ## Dataset Creation
147
-
148
- ### Curation Rationale
149
-
150
- Pragmatic language understanding is a crucial aspect of human communication, and implicatures are the primary object of study in this field.
151
- We want computational models of language to understand all the speakers implications.
152
-
153
- ### Source Data
154
-
155
- #### Initial Data Collection and Normalization
156
-
157
- "Conversational implicatures in English dialogue: Annotated dataset", Elizabeth Jasmi George and Radhika Mamidi 2020.
158
-
159
- [Link to paper](https://doi.org/10.1016/j.procs.2020.04.251)
160
-
161
- #### Who are the source language producers?
162
-
163
- These written representations of the utterances are collected manually by scraping and transcribing from relevant sources from August, 2019 to August, 2020. The source of dialogues in the data include TOEFL listening comprehension short conversations, movie dialogues from IMSDb and websites explaining idioms, similes, metaphors and hyperboles. The implicatures are annotated manually.
164
-
165
- ### Annotations
166
-
167
- #### Annotation process
168
-
169
- Manually annotated by dataset collectors.
170
-
171
- #### Who are the annotators?
172
-
173
- Authors of the original paper.
174
-
175
- ### Personal and Sensitive Information
176
-
177
- All the data is public and not sensitive.
178
-
179
- ## Considerations for Using the Data
180
-
181
- ### Social Impact of Dataset
182
-
183
- Any application that requires communicating with humans requires pragmatic language understanding.
184
-
185
- ### Discussion of Biases
186
-
187
- Implicatures can be biased to specific cultures. For example, whether the Pope is Catholic (a common used response implicature to indicate "yes") might not be common knowledge for everyone.
188
- Implicatures are also language-specific, the way people use pragmatic language depends on the language. This dataset only focuses on the English language.
189
-
190
- ### Other Known Limitations
191
-
192
- None yet.
193
-
194
- ## Additional Information
195
-
196
- ### Dataset Curators
197
-
198
- Elizabeth Jasmi George and Radhika Mamidi
199
-
200
- ### Licensing Information
201
-
202
- [license](https://creativecommons.org/licenses/by/4.0/)
203
-
204
- ### Citation Information
205
-
206
- ```
207
- @article{George:Mamidi:2020,
208
- author = {George, Elizabeth Jasmi and Mamidi, Radhika},
209
- doi = {10.1016/j.procs.2020.04.251},
210
- journal = {Procedia Computer Science},
211
- keywords = {},
212
- note = {https://doi.org/10.1016/j.procs.2020.04.251},
213
- number = {},
214
- pages = {2316-2323},
215
- title = {Conversational implicatures in English dialogue: Annotated dataset},
216
- url = {https://app.dimensions.ai/details/publication/pub.1128198497},
217
- volume = {171},
218
- year = {2020}
219
- }
220
- ```
221
-
222
- ### Contributions
223
-
224
- Thanks to [@LauraRuis](https://github.com/LauraRuis) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ludwig.py DELETED
@@ -1,269 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """LUDWIG, (Language Understanding With Implied meaninG). The conversational implicature dataset."""
18
-
19
-
20
- from typing import Dict, Union
21
- import numpy as np
22
- import copy
23
- import csv
24
- import os
25
-
26
- import datasets
27
-
28
-
29
- logger = datasets.logging.get_logger(__name__)
30
-
31
-
32
- _CITATION = """\
33
- TBC
34
- """
35
-
36
- _DESCRIPTION = """\
37
- TODO
38
- """
39
-
40
- _URL = "https://raw.githubusercontent.com/ucl-dark/ludwig/main/"
41
- _URLS = {
42
- "dev": _URL + "dev_conversational_implicatures.csv",
43
- "test": _URL + "test_conversational_implicatures.csv"
44
- }
45
-
46
-
47
- class LudwigConfig(datasets.BuilderConfig):
48
- """BuilderConfig for LUDWIG."""
49
-
50
- def __init__(self, k: int, seed: int, **kwargs):
51
- """BuilderConfig for LUDWIG.
52
- Args:
53
- **kwargs: keyword arguments forwarded to super.
54
- """
55
- super(LudwigConfig, self).__init__(**kwargs)
56
- self.k = k
57
- self.seed = seed
58
- self.rng = np.random.default_rng(seed)
59
-
60
- def __eq__(self, other):
61
- return self.k == other.k and self.seed == other.seed
62
-
63
- def reset_rng(self):
64
- self.rng = np.random.default_rng(self.seed)
65
-
66
-
67
- class Ludwig(datasets.GeneratorBasedBuilder):
68
- """LUDWIG: Conversational implicatures dataset."""
69
-
70
- BUILDER_CONFIGS = [
71
- LudwigConfig(
72
- name="0-shot",
73
- version=datasets.Version("1.0.0", ""),
74
- description="Plain text",
75
- k=0,
76
- seed=0,
77
- ),
78
- LudwigConfig(
79
- name="1-shot",
80
- version=datasets.Version("1.0.0", ""),
81
- description="Plain text",
82
- k=1,
83
- seed=0
84
- ),
85
- LudwigConfig(
86
- name="5-shot",
87
- version=datasets.Version("1.0.0", ""),
88
- description="Plain text",
89
- k=5,
90
- seed=0
91
- ),
92
- LudwigConfig(
93
- name="10-shot",
94
- version=datasets.Version("1.0.0", ""),
95
- description="Plain text",
96
- k=10,
97
- seed=0
98
- ),
99
- LudwigConfig(
100
- name="15-shot",
101
- version=datasets.Version("1.0.0", ""),
102
- description="Plain text",
103
- k=15,
104
- seed=0
105
- ),
106
- LudwigConfig(
107
- name="30-shot",
108
- version=datasets.Version("1.0.0", ""),
109
- description="Plain text",
110
- k=30,
111
- seed=0
112
- )
113
- ]
114
-
115
- def _info(self):
116
- return datasets.DatasetInfo(
117
- description=_DESCRIPTION,
118
- features=datasets.Features(
119
- {
120
- "id": datasets.Value("string"),
121
- "utterance": datasets.Value("string"),
122
- "response": datasets.Value("string"),
123
- "implicature": datasets.Value("string"),
124
- "incoherent_implicature": datasets.Value("string"),
125
- "prompts": datasets.features.Sequence(
126
- {
127
- "utterance": datasets.Value("string"),
128
- "response": datasets.Value("string"),
129
- "implicature": datasets.Value("string"),
130
- "incoherent_implicature": datasets.Value("string"),
131
- }
132
- )
133
- }
134
- ),
135
- # No default supervised_keys (as we have to pass both question
136
- # and context as input).
137
- supervised_keys=None,
138
- homepage="https://github.com/ucl-dark/ludwig",
139
- citation=_CITATION
140
- )
141
-
142
- def _split_generators(self, dl_manager):
143
- downloaded_files = dl_manager.download_and_extract(_URLS)
144
-
145
- return [
146
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"],
147
- "dev_filepath": downloaded_files["dev"],
148
- "k": self.config.k}),
149
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"],
150
- "dev_filepath": downloaded_files["dev"],
151
- "k": self.config.k}),
152
- ]
153
-
154
- @staticmethod
155
- def _process_text(text):
156
- return text.strip("\n")
157
-
158
- def _filter_examples(
159
- self, input_line: Dict[str, str],
160
- ) -> Union[None, Dict[str, str]]:
161
- """
162
- Takes an input_line from the csv file and filters all examples
163
- where the implicature is not a simple yes or no.
164
- :param input_line: a line read from a csv file with data
165
- :param source: the source of the example
166
- :return:
167
- """
168
- if not input_line:
169
- return None
170
- if "yes" in input_line["Implicature"].lower()[:5]:
171
- implicature = "yes"
172
- elif "no" in input_line["Implicature"].lower()[:4]:
173
- implicature = "no"
174
- else:
175
- return None
176
- response = self._process_text(input_line["Response utterance"])
177
- example = {
178
- "utterance": self._process_text(input_line["Context utterance"]),
179
- "response": response,
180
- "implicature": implicature,
181
- }
182
- return example
183
-
184
- def get_negative_binary_example(self, example):
185
- """
186
- Creates a false example for a binary implicature example.
187
- :param example:
188
- :return: the same dict as the input except for the implicature is negated (yes to no and vice-versa)
189
- """
190
- if example["implicature"] == "yes":
191
- false_implicature = "no"
192
- elif example["implicature"] == "no":
193
- false_implicature = "yes"
194
- else:
195
- raise ValueError("Unknown implicature %s" % example["implicature"])
196
- false_example = copy.deepcopy(example)
197
- false_example["implicature"] = false_implicature
198
- return false_example
199
-
200
- def read_data_csv(
201
- self,
202
- test_input_data_path: str,
203
- dev_input_data_path: str,
204
- ):
205
- assert os.path.exists(
206
- test_input_data_path
207
- ), "No input data file found at: %s\n" "Current working direction: %s" % (
208
- test_input_data_path,
209
- os.getcwd(),
210
- )
211
- assert os.path.exists(
212
- dev_input_data_path
213
- ), "No dev input data file found at: %s\n" "Current working direction: %s" % (
214
- dev_input_data_path,
215
- os.getcwd(),
216
- )
217
- with open(test_input_data_path, newline="") as csvfile:
218
- with open(dev_input_data_path, newline="") as dev_csvfile:
219
- reader = csv.DictReader(csvfile)
220
- dev_reader = csv.DictReader(dev_csvfile)
221
- all_data = {
222
- "test_data": [],
223
- "dev_data": [],
224
- }
225
- for row in reader:
226
- example = self._filter_examples(row)
227
- if example is not None:
228
- negative_example = self.get_negative_binary_example(example)["implicature"]
229
- example = {**example,
230
- "incoherent_implicature": negative_example}
231
- all_data["test_data"].append(example)
232
- for row in dev_reader:
233
- example = self._filter_examples(row)
234
- if example is not None:
235
- negative_example = self.get_negative_binary_example(example)["implicature"]
236
- example = {**example,
237
- "incoherent_implicature": negative_example}
238
- all_data["dev_data"].append(example)
239
- return all_data
240
-
241
- def _get_prompt_examples(self, dev_data, k_shot=0):
242
- """
243
- A function to parse the i-th example in self.data["data"]
244
- :param dev_data: list of examples to sample from
245
- :param k_shot: how many extra examples to parse from different indices than i
246
- :return: a parsed example
247
- """
248
- if k_shot > 0:
249
- prompt_indices = self.config.rng.choice(
250
- range(len(dev_data)), k_shot, replace=False
251
- )
252
- prompt_examples = [dev_data[j] for j in prompt_indices]
253
- else:
254
- prompt_examples = []
255
- return prompt_examples
256
-
257
- def _generate_examples(self, filepath, dev_filepath, k: int):
258
- """This function returns the examples in the raw (text) form."""
259
- logger.info("generating examples from = %s", filepath)
260
- logger.info("k-shot examples from = %s", dev_filepath)
261
- all_data = self.read_data_csv(filepath, dev_filepath)
262
- self.config.reset_rng()
263
- for i, example in enumerate(all_data["test_data"]):
264
- prompt_examples = self._get_prompt_examples(all_data["dev_data"], k)
265
- yield i, {
266
- **example,
267
- "prompts": prompt_examples,
268
- "id": i + 1,
269
- }