Datasets:

Lauler commited on
Commit
1aa1632
·
1 Parent(s): 598981c

Simplify and remove BuilderConfigs

Browse files
Files changed (1) hide show
  1. rixvox.py +13 -52
rixvox.py CHANGED
@@ -50,18 +50,6 @@ class Rixvox(datasets.GeneratorBasedBuilder):
50
  VERSION = datasets.Version("1.0.0")
51
  DEFAULT_CONFIG_NAME = "all"
52
 
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(
55
- name="train", version=VERSION, description="Training set of the RixVox dataset. 5383 hours of speech."
56
- ),
57
- datasets.BuilderConfig(
58
- name="dev", version=VERSION, description="Development set of the RixVox dataset. 52 hours of speech."
59
- ),
60
- datasets.BuilderConfig(
61
- name="test", version=VERSION, description="Test set of the RixVox dataset. 59 hours of speech."
62
- ),
63
- ]
64
-
65
  def _info(self):
66
 
67
  features = datasets.Features(
@@ -104,20 +92,10 @@ class Rixvox(datasets.GeneratorBasedBuilder):
104
  splits = ["train", "dev", "test"]
105
  meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
106
 
107
- if self.config.name == "all":
108
- archive_urls = {
109
- split: [_DATA_URL.format(split=split, shard_idx=idx) for idx in range(0, _N_SHARDS[split])]
110
- for split in splits
111
- }
112
- else:
113
- archive_urls = {
114
- self.config.name: [
115
- _DATA_URL.format(split=self.config.name, shard_idx=idx)
116
- for idx in range(0, _N_SHARDS[self.config.name])
117
- ]
118
- }
119
- # Choose single split
120
- meta_urls = {self.config.name: meta_urls[self.config.name]}
121
 
122
  archive_paths = dl_manager.download(archive_urls)
123
  local_extracted_archives = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
@@ -130,28 +108,14 @@ class Rixvox(datasets.GeneratorBasedBuilder):
130
  "test": datasets.Split.TEST,
131
  }
132
 
133
- if self.config.name == "all":
134
- for split in splits:
135
- split_generators.append(
136
- datasets.SplitGenerator(
137
- name=split_names.get(split),
138
- gen_kwargs={
139
- "local_extracted_archive_paths": local_extracted_archives.get(split),
140
- "archive_iters": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
141
- "meta_paths": meta_paths[split],
142
- },
143
- ),
144
- )
145
- else:
146
  split_generators.append(
147
  datasets.SplitGenerator(
148
- name=split_names.get(self.config.name),
149
  gen_kwargs={
150
- "local_extracted_archive_paths": local_extracted_archives.get(self.config.name),
151
- "archive_iters": [
152
- dl_manager.iter_archive(path) for path in archive_paths.get(self.config.name)
153
- ],
154
- "meta_paths": meta_paths, # meta_paths is a dict with one key, the split name
155
  },
156
  ),
157
  )
@@ -167,14 +131,11 @@ class Rixvox(datasets.GeneratorBasedBuilder):
167
  ):
168
  key = 0
169
 
170
- if self.config.name == "all":
171
- data = []
172
- for meta_path in meta_paths.values():
173
- data.append(pd.read_parquet(meta_path))
174
 
175
- df_meta = pd.concat(data)
176
- else:
177
- df_meta = pd.read_parquet(meta_paths[self.config.name])
178
 
179
  df_meta = df_meta.set_index("filename", drop=False)
180
  # Column contains NAType, so we convert to object type column and NAType to None values.
 
50
  VERSION = datasets.Version("1.0.0")
51
  DEFAULT_CONFIG_NAME = "all"
52
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def _info(self):
54
 
55
  features = datasets.Features(
 
92
  splits = ["train", "dev", "test"]
93
  meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
94
 
95
+ archive_urls = {
96
+ split: [_DATA_URL.format(split=split, shard_idx=idx) for idx in range(0, _N_SHARDS[split])]
97
+ for split in splits
98
+ }
 
 
 
 
 
 
 
 
 
 
99
 
100
  archive_paths = dl_manager.download(archive_urls)
101
  local_extracted_archives = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
 
108
  "test": datasets.Split.TEST,
109
  }
110
 
111
+ for split in splits:
 
 
 
 
 
 
 
 
 
 
 
 
112
  split_generators.append(
113
  datasets.SplitGenerator(
114
+ name=split_names.get(split),
115
  gen_kwargs={
116
+ "local_extracted_archive_paths": local_extracted_archives.get(split),
117
+ "archive_iters": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
118
+ "meta_paths": meta_paths[split],
 
 
119
  },
120
  ),
121
  )
 
131
  ):
132
  key = 0
133
 
134
+ data = []
135
+ for meta_path in meta_paths:
136
+ data.append(pd.read_parquet(meta_path))
 
137
 
138
+ df_meta = pd.concat(data)
 
 
139
 
140
  df_meta = df_meta.set_index("filename", drop=False)
141
  # Column contains NAType, so we convert to object type column and NAType to None values.