Datasets:
pmc
/

Languages:
English
License:
albertvillanova HF staff commited on
Commit
1ac303e
1 Parent(s): b443dc5

Parallelize data download (#9)

Browse files

- Add baseline ranges (1ff41f907be30dcb6bb3d7f43529651007469018)
- Refactor code with baseline ranges (7897dd5d9b0410681e252b2b11635e009b9397b0)
- Parallelize with zipped file_list and archive (de237a59246cf5f5e69a50c983f6dbc3c8a06691)
- Join baseline and incremental paths (cff04b2d5f500ef9d9a37a79c7aaeba13c14412d)

Files changed (1) hide show
  1. open_access.py +40 -89
open_access.py CHANGED
@@ -57,6 +57,11 @@ _SUBSETS = {
57
  }
58
  _BASELINE_DATE = "2022-12-17"
59
  _BASELINE_MAX_RANGE = 10
 
 
 
 
 
60
 
61
 
62
  class OpenAccessConfig(datasets.BuilderConfig):
@@ -71,7 +76,8 @@ class OpenAccessConfig(datasets.BuilderConfig):
71
  """
72
  subsets = [subsets] if isinstance(subsets, str) else subsets
73
  super().__init__(
74
- name="+".join(subsets), **kwargs,
 
75
  )
76
  self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
77
 
@@ -106,36 +112,16 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
106
 
107
  def _split_generators(self, dl_manager):
108
 
109
- incremental_paths = {
110
- "incremental_file_lists": [],
111
- "incremental_archives": []
112
- }
113
- baseline_file_lists = []
114
- baseline_archives = []
115
-
116
  for subset in self.config.subsets:
117
  url = _URL.format(subset=_SUBSETS[subset])
118
  basename = f"{_SUBSETS[subset]}_txt."
119
  # Baselines
120
- baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in range(_BASELINE_MAX_RANGE)]
121
- # baseline_urls = {
122
- # "baseline_file_lists": [f"{url}{basename}{baseline}.filelist.csv" for baseline in baselines],
123
- # "baseline_archives": [f"{url}{basename}{baseline}.tar.gz" for baseline in baselines],
124
- # }
125
- # baseline_paths = dl_manager.download(baseline_urls)
126
- for baseline in baselines:
127
- baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
128
- try:
129
- baseline_file_list = dl_manager.download(baseline_file_list_url)
130
- except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
131
- continue
132
- baseline_archive_url = f"{url}{basename}{baseline}.tar.gz"
133
- try:
134
- baseline_archive = dl_manager.download(baseline_archive_url)
135
- except FileNotFoundError:
136
- continue
137
- baseline_file_lists.append(baseline_file_list)
138
- baseline_archives.append(baseline_archive)
139
  # Incremental
140
  date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
141
  incremental_dates = [
@@ -143,75 +129,40 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
143
  for i in range(date_delta.days)
144
  ]
145
  incrementals = [f"incr.{date}" for date in incremental_dates]
146
- incremental_urls = {
147
- "incremental_file_lists": [
148
- f"{url}{basename}{incremental}.filelist.csv" for incremental in incrementals
149
- ],
150
- "incremental_archives": [f"{url}{basename}{incremental}.tar.gz" for incremental in incrementals],
151
- }
152
- paths = dl_manager.download(incremental_urls)
153
- incremental_paths["incremental_file_lists"].extend(paths["incremental_file_lists"])
154
- incremental_paths["incremental_archives"].extend(paths["incremental_archives"])
155
 
156
  return [
157
  datasets.SplitGenerator(
158
  name=datasets.Split.TRAIN,
159
  gen_kwargs={
160
- "baseline_file_lists": baseline_file_lists,
161
- "baseline_archives": [dl_manager.iter_archive(archive) for archive in baseline_archives],
162
- "incremental_file_lists": incremental_paths["incremental_file_lists"],
163
- "incremental_archives": [
164
- dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
165
- ],
166
  },
167
  ),
168
  ]
169
 
170
- def _generate_examples(self, baseline_file_lists, baseline_archives, incremental_file_lists, incremental_archives):
171
  key = 0
172
- # Baselines
173
- for baseline_file_list, baseline_archive in zip(baseline_file_lists, baseline_archives):
174
- try:
175
- baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
176
- for path, file in baseline_archive:
177
- data = baselines.pop(path)
178
- content = file.read()
179
- try:
180
- text = content.decode("utf-8").strip()
181
- except UnicodeDecodeError as e:
182
- text = content.decode("latin-1").strip()
183
- data = {
184
- "text": text,
185
- "pmid": data["PMID"],
186
- "accession_id": data["AccessionID"],
187
- "license": data["License"],
188
- "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
189
- "retracted": data["Retracted"],
190
- "citation": data["Article Citation"],
191
- }
192
- yield key, data
193
- key += 1
194
- except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
195
- continue
196
- # Incrementals
197
- if incremental_file_lists:
198
- for incremental_file_list, incremental_archive in zip(incremental_file_lists, incremental_archives):
199
- incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
200
- for path, file in incremental_archive:
201
- data = incrementals.pop(path)
202
- content = file.read()
203
- try:
204
- text = content.decode("utf-8").strip()
205
- except UnicodeDecodeError as e:
206
- text = content.decode("latin-1").strip()
207
- data = {
208
- "text": text,
209
- "pmid": data["PMID"],
210
- "accession_id": data["AccessionID"],
211
- "license": data["License"],
212
- "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
213
- "retracted": data["Retracted"],
214
- "citation": data["Article Citation"],
215
- }
216
- yield key, data
217
- key += 1
 
57
  }
58
  _BASELINE_DATE = "2022-12-17"
59
  _BASELINE_MAX_RANGE = 10
60
+ _BASELINE_RANGES = {
61
+ "commercial": range(_BASELINE_MAX_RANGE),
62
+ "non_commercial": range(1, _BASELINE_MAX_RANGE), # non-commercial PMC000xxxxxx baseline does not exist
63
+ "other": range(_BASELINE_MAX_RANGE),
64
+ }
65
 
66
 
67
  class OpenAccessConfig(datasets.BuilderConfig):
 
76
  """
77
  subsets = [subsets] if isinstance(subsets, str) else subsets
78
  super().__init__(
79
+ name="+".join(subsets),
80
+ **kwargs,
81
  )
82
  self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
83
 
 
112
 
113
  def _split_generators(self, dl_manager):
114
 
115
+ paths = []
 
 
 
 
 
 
116
  for subset in self.config.subsets:
117
  url = _URL.format(subset=_SUBSETS[subset])
118
  basename = f"{_SUBSETS[subset]}_txt."
119
  # Baselines
120
+ baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
121
+ baseline_urls = [
122
+ (f"{url}{basename}{baseline}.filelist.csv", f"{url}{basename}{baseline}.tar.gz")
123
+ for baseline in baselines
124
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  # Incremental
126
  date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
127
  incremental_dates = [
 
129
  for i in range(date_delta.days)
130
  ]
131
  incrementals = [f"incr.{date}" for date in incremental_dates]
132
+ incremental_urls = [
133
+ (f"{url}{basename}{incremental}.filelist.csv", f"{url}{basename}{incremental}.tar.gz")
134
+ for incremental in incrementals
135
+ ]
136
+ paths += dl_manager.download(baseline_urls + incremental_urls)
 
 
 
 
137
 
138
  return [
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TRAIN,
141
  gen_kwargs={
142
+ "paths": [(file_list, dl_manager.iter_archive(archive)) for file_list, archive in paths],
 
 
 
 
 
143
  },
144
  ),
145
  ]
146
 
147
+ def _generate_examples(self, paths):
148
  key = 0
149
+ for file_list, archive in paths:
150
+ file_list_data = pd.read_csv(file_list, index_col="Article File").to_dict(orient="index")
151
+ for path, file in archive:
152
+ data = file_list_data.pop(path)
153
+ content = file.read()
154
+ try:
155
+ text = content.decode("utf-8").strip()
156
+ except UnicodeDecodeError as e:
157
+ text = content.decode("latin-1").strip()
158
+ data = {
159
+ "text": text,
160
+ "pmid": data["PMID"],
161
+ "accession_id": data["AccessionID"],
162
+ "license": data["License"],
163
+ "last_updated": data["LastUpdated (YYYY-MM-DD HH:MM:SS)"],
164
+ "retracted": data["Retracted"],
165
+ "citation": data["Article Citation"],
166
+ }
167
+ yield key, data
168
+ key += 1