cdminix commited on
Commit
4ca1449
·
1 Parent(s): f334f8a

fixed train and dev speaker overlap

Browse files
Files changed (1) hide show
  1. libritts-r-aligned.py +28 -12
libritts-r-aligned.py CHANGED
@@ -26,7 +26,7 @@ _PATH = os.environ.get("LIBRITTS_PATH", os.environ.get("HF_DATASETS_CACHE", None
26
  if _PATH is not None and not os.path.exists(_PATH):
27
  os.makedirs(_PATH)
28
 
29
- _VERSION = "1.0.1"
30
 
31
  _CITATION = """\
32
  @article{koizumi2023libritts,
@@ -133,12 +133,11 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
133
  data_train = self._create_data([ds_dict["train-clean-100"], ds_dict["train-clean-360"], ds_dict["train-other-500"]])
134
  data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
135
  data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]])
136
- data_all = pd.concat([data_train, data_dev, data_test])
137
  splits += [
138
  datasets.SplitGenerator(
139
  name="train.all",
140
  gen_kwargs={
141
- "ds": data_all,
142
  }
143
  ),
144
  datasets.SplitGenerator(
@@ -154,13 +153,30 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
154
  }
155
  ),
156
  ]
157
- # move last row for each speaker from data_all to dev dataframe
158
- data_dev = data_all.copy()
159
- data_dev = data_dev.sort_values(by=["speaker", "audio"])
160
- data_dev = data_dev.groupby("speaker").tail(1)
161
- data_dev = data_dev.reset_index()
162
- # remove last row for each speaker from data_all
163
- data_all = data_all[~data_all["audio"].isin(data_dev["audio"])]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  splits += [
165
  datasets.SplitGenerator(
166
  name="train",
@@ -171,7 +187,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
171
  datasets.SplitGenerator(
172
  name="dev",
173
  gen_kwargs={
174
- "ds": data_dev,
175
  }
176
  ),
177
  ]
@@ -234,6 +250,7 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
234
  entries += add_entries
235
  if self.empty_textgrids > 0:
236
  logger.warning(f"Found {self.empty_textgrids} empty textgrids")
 
237
  return pd.DataFrame(
238
  entries,
239
  columns=[
@@ -247,7 +264,6 @@ class LibriTTSAlign(datasets.GeneratorBasedBuilder):
247
  "basename",
248
  ],
249
  )
250
- del self.ds, self.phone_cache, self.phone_converter
251
 
252
  def _create_entry(self, dsi_idx):
253
  dsi, idx = dsi_idx
 
26
  if _PATH is not None and not os.path.exists(_PATH):
27
  os.makedirs(_PATH)
28
 
29
+ _VERSION = "1.1.0"
30
 
31
  _CITATION = """\
32
  @article{koizumi2023libritts,
 
133
  data_train = self._create_data([ds_dict["train-clean-100"], ds_dict["train-clean-360"], ds_dict["train-other-500"]])
134
  data_dev = self._create_data([ds_dict["dev-clean"], ds_dict["dev-other"]])
135
  data_test = self._create_data([ds_dict["test-clean"], ds_dict["test-other"]])
 
136
  splits += [
137
  datasets.SplitGenerator(
138
  name="train.all",
139
  gen_kwargs={
140
+ "ds": data_train,
141
  }
142
  ),
143
  datasets.SplitGenerator(
 
153
  }
154
  ),
155
  ]
156
+ data_all = pd.concat([data_train, data_dev, data_test])
157
+ # create a new split which takes one sample from each speaker in data_all and puts it into the dev split
158
+ # we then remove these samples from data_all
159
+ speakers = data_all["speaker"].unique()
160
+ # seed for reproducibility
161
+ np.random.seed(42)
162
+ data_dev_all = None
163
+ for speaker in tqdm(speakers, desc="creating dev split"):
164
+ data_speaker = data_all[data_all["speaker"] == speaker]
165
+ if len(data_speaker) < 10:
166
+ print(f"Speaker {speaker} has only {len(data_speaker)} samples, skipping")
167
+ else:
168
+ data_speaker = data_speaker.sample(1)
169
+ data_all = data_all[data_all["audio"] != data_speaker["audio"].values[0]]
170
+ if data_dev_all is None:
171
+ data_dev_all = data_speaker
172
+ else:
173
+ data_dev_all = pd.concat([data_dev_all, data_speaker])
174
+ data_all = data_all[data_all["speaker"].isin(data_dev_all["speaker"].unique())]
175
+ self.speaker2idxs = {}
176
+ self.speaker2idxs["all"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev_all["speaker"].unique())))}
177
+ self.speaker2idxs["train"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_train["speaker"].unique())))}
178
+ self.speaker2idxs["dev"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_dev["speaker"].unique())))}
179
+ self.speaker2idxs["test"] = {speaker: idx for idx, speaker in enumerate(sorted(list(data_test["speaker"].unique())))}
180
  splits += [
181
  datasets.SplitGenerator(
182
  name="train",
 
187
  datasets.SplitGenerator(
188
  name="dev",
189
  gen_kwargs={
190
+ "ds": data_dev_all,
191
  }
192
  ),
193
  ]
 
250
  entries += add_entries
251
  if self.empty_textgrids > 0:
252
  logger.warning(f"Found {self.empty_textgrids} empty textgrids")
253
+ del self.ds, self.phone_cache, self.phone_converter
254
  return pd.DataFrame(
255
  entries,
256
  columns=[
 
264
  "basename",
265
  ],
266
  )
 
267
 
268
  def _create_entry(self, dsi_idx):
269
  dsi, idx = dsi_idx