Datasets:

Modalities:
Tabular
Text
Languages:
English
Libraries:
Datasets
License:

Fix streaming TAR archive

#2
by albertvillanova HF staff - opened
Files changed (1) hide show
  1. scicite.py +56 -52
scicite.py CHANGED
@@ -29,7 +29,6 @@ TODO: Use standard BigBio missing values.
29
  """
30
 
31
  import json
32
- import os
33
  from typing import Dict, List, Tuple
34
 
35
  import datasets
@@ -147,84 +146,89 @@ class SciciteDataset(datasets.GeneratorBasedBuilder):
147
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
148
  """Returns SplitGenerators."""
149
  urls = _URLS[_DATASETNAME]
150
- data_dir = dl_manager.download_and_extract(urls)
151
 
152
  return [
153
  datasets.SplitGenerator(
154
  name=datasets.Split.TRAIN,
155
  gen_kwargs={
156
- "filepath": os.path.join(data_dir, "scicite", "train.jsonl"),
 
157
  "split": "train",
158
  },
159
  ),
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TEST,
162
  gen_kwargs={
163
- "filepath": os.path.join(data_dir, "scicite", "test.jsonl"),
 
164
  "split": "test",
165
  },
166
  ),
167
  datasets.SplitGenerator(
168
  name=datasets.Split.VALIDATION,
169
  gen_kwargs={
170
- "filepath": os.path.join(data_dir, "scicite", "dev.jsonl"),
 
171
  "split": "dev",
172
  },
173
  ),
174
  ]
175
 
176
- def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
177
  """Yields examples as (key, example) tuples."""
178
 
179
- with open(filepath, "r") as data_file:
180
- examples = [json.loads(line) for line in data_file]
 
 
181
 
182
- # Preprocesses examples
183
- keys = set()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  for example in examples:
185
- # Fixes duplicate keys
186
- if example["unique_id"] in keys:
187
- example["unique_id"] = example["unique_id"] + "_duplicate"
188
  else:
189
- keys.add(example["unique_id"])
190
-
191
- if self.config.schema == "source":
192
- for example in examples:
193
- yield str(example["unique_id"]), {
194
- "string": example["string"],
195
- "label": str(example["label"]),
196
- "sectionName": str(example["sectionName"]),
197
- "citingPaperId": str(example["citingPaperId"]),
198
- "citedPaperId": str(example["citedPaperId"]),
199
- "excerpt_index": int(example["excerpt_index"]),
200
- "isKeyCitation": bool(example["isKeyCitation"]),
201
- "label2": str(example.get("label2", "none")),
202
- "citeEnd": _safe_int(example["citeEnd"]),
203
- "citeStart": _safe_int(example["citeStart"]),
204
- "source": str(example["source"]),
205
- "label_confidence": float(
206
- example.get("label_confidence", np.nan)
207
- ),
208
- "label2_confidence": float(
209
- example.get("label2_confidence", np.nan)
210
- ),
211
- "id": str(example["id"]),
212
- "unique_id": str(example["unique_id"]),
213
- }
214
-
215
- elif self.config.schema == "bigbio_text":
216
- for example in examples:
217
- if "label2" in example:
218
- labels = [example["label"], example["label2"]]
219
- else:
220
- labels = [example["label"]]
221
-
222
- yield str(example["unique_id"]), {
223
- "id": example["unique_id"],
224
- "document_id": example["citingPaperId"],
225
- "text": example["string"],
226
- "labels": labels,
227
- }
228
 
229
 
230
  def _safe_int(a):
 
29
  """
30
 
31
  import json
 
32
  from typing import Dict, List, Tuple
33
 
34
  import datasets
 
146
  def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
147
  """Returns SplitGenerators."""
148
  urls = _URLS[_DATASETNAME]
149
+ data_dir = dl_manager.download(urls)
150
 
151
  return [
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TRAIN,
154
  gen_kwargs={
155
+ "archive": dl_manager.iter_archive(data_dir),
156
+ "filepath": "scicite/train.jsonl",
157
  "split": "train",
158
  },
159
  ),
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TEST,
162
  gen_kwargs={
163
+ "archive": dl_manager.iter_archive(data_dir),
164
+ "filepath": "scicite/test.jsonl",
165
  "split": "test",
166
  },
167
  ),
168
  datasets.SplitGenerator(
169
  name=datasets.Split.VALIDATION,
170
  gen_kwargs={
171
+ "archive": dl_manager.iter_archive(data_dir),
172
+ "filepath": "scicite/dev.jsonl",
173
  "split": "dev",
174
  },
175
  ),
176
  ]
177
 
178
+ def _generate_examples(self, archive, filepath, split: str) -> Tuple[int, Dict]:
179
  """Yields examples as (key, example) tuples."""
180
 
181
+ for path, file in archive:
182
+ if path == filepath:
183
+ examples = [json.loads(line) for line in file]
184
+ break
185
 
186
+ # Preprocesses examples
187
+ keys = set()
188
+ for example in examples:
189
+ # Fixes duplicate keys
190
+ if example["unique_id"] in keys:
191
+ example["unique_id"] = example["unique_id"] + "_duplicate"
192
+ else:
193
+ keys.add(example["unique_id"])
194
+
195
+ if self.config.schema == "source":
196
+ for example in examples:
197
+ yield str(example["unique_id"]), {
198
+ "string": example["string"],
199
+ "label": str(example["label"]),
200
+ "sectionName": str(example["sectionName"]),
201
+ "citingPaperId": str(example["citingPaperId"]),
202
+ "citedPaperId": str(example["citedPaperId"]),
203
+ "excerpt_index": int(example["excerpt_index"]),
204
+ "isKeyCitation": bool(example["isKeyCitation"]),
205
+ "label2": str(example.get("label2", "none")),
206
+ "citeEnd": _safe_int(example["citeEnd"]),
207
+ "citeStart": _safe_int(example["citeStart"]),
208
+ "source": str(example["source"]),
209
+ "label_confidence": float(
210
+ example.get("label_confidence", np.nan)
211
+ ),
212
+ "label2_confidence": float(
213
+ example.get("label2_confidence", np.nan)
214
+ ),
215
+ "id": str(example["id"]),
216
+ "unique_id": str(example["unique_id"]),
217
+ }
218
+
219
+ elif self.config.schema == "bigbio_text":
220
  for example in examples:
221
+ if "label2" in example:
222
+ labels = [example["label"], example["label2"]]
 
223
  else:
224
+ labels = [example["label"]]
225
+
226
+ yield str(example["unique_id"]), {
227
+ "id": example["unique_id"],
228
+ "document_id": example["citingPaperId"],
229
+ "text": example["string"],
230
+ "labels": labels,
231
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
 
234
  def _safe_int(a):