asahi417 commited on
Commit
febb4b1
·
1 Parent(s): f88663e
Files changed (1) hide show
  1. push_s2t_translation.py +5 -26
push_s2t_translation.py CHANGED
@@ -59,12 +59,14 @@ for i in tqdm(list(range(line_no_start, line_no_end))):
59
  continue
60
  i = loader(files[i])
61
  i[f"{direction_text}.text"] = line2text[str(i["line_no"])]
62
- audio_file = i[f"{direction_speech}.path"]
63
  start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"]
64
  if os.path.exists(audio_file):
65
  try:
66
  wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
67
  if start < end < len(wav["array"]):
 
 
68
  features.append(i)
69
  else:
70
  delete_audio(audio_file)
@@ -74,34 +76,11 @@ for i in tqdm(list(range(line_no_start, line_no_end))):
74
 
75
 
76
  print(f"features (filtered): {len(features)}")
77
- data_dict = {f"{direction_speech}.audio": [i.pop(f"{direction_speech}.path") for i in features]}
78
  keys = features[0].keys()
79
- data_dict.update({k: [i[k] for i in features] for k in keys})
80
  audio_dataset = Dataset.from_dict(data_dict)
81
  audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio())
82
-
83
-
84
- # trim the audio according to the duration
85
- def clip_audio(batch):
86
- start = batch[f"{direction_speech}.duration_start"]
87
- end = batch[f"{direction_speech}.duration_end"]
88
- audio = batch[f"{direction_speech}.audio"]
89
- batch[f"{direction_speech}.audio"] = [
90
- {"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]}
91
- for a, s, e in zip(audio, start, end)
92
- ]
93
- return batch
94
-
95
-
96
- audio_dataset_valid = audio_dataset_valid.map(
97
- function=clip_audio,
98
- batched=True,
99
- batch_size=128,
100
- num_proc=1,
101
- desc="clipping audio based on the duration:"
102
- )
103
-
104
- dataset_to_push = DatasetDict({"train": audio_dataset_valid})
105
  repo_name = f"{hf_org}/{hf_dataset}"
106
  while True:
107
  try:
 
59
  continue
60
  i = loader(files[i])
61
  i[f"{direction_text}.text"] = line2text[str(i["line_no"])]
62
+ audio_file = i.pop(f"{direction_speech}.path")
63
  start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"]
64
  if os.path.exists(audio_file):
65
  try:
66
  wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
67
  if start < end < len(wav["array"]):
68
+ wav["array"] = wav["array"][start:end]
69
+ i[f"{direction_speech}.audio"] = wav
70
  features.append(i)
71
  else:
72
  delete_audio(audio_file)
 
76
 
77
 
78
  print(f"features (filtered): {len(features)}")
 
79
  keys = features[0].keys()
80
+ data_dict = {k: [i[k] for i in features] for k in keys}
81
  audio_dataset = Dataset.from_dict(data_dict)
82
  audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio())
83
+ dataset_to_push = DatasetDict({"train": audio_dataset})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  repo_name = f"{hf_org}/{hf_dataset}"
85
  while True:
86
  try: