asahi417 commited on
Commit
d775ebe
·
1 Parent(s): febb4b1
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. filter_audio.py +43 -44
  3. main.sh +62 -2
.DS_Store ADDED
Binary file (6.15 kB). View file
 
filter_audio.py CHANGED
@@ -4,20 +4,22 @@ from os.path import join as p_join
4
  from tqdm import tqdm
5
  from typing import Dict
6
  from glob import glob
 
7
 
8
- from soundfile import LibsndfileError
9
  from datasets import Audio
10
 
11
  # dataset config
12
- direction = os.getenv("DIRECTION", "enA-jaA")
13
- sides = {i: n for n, i in enumerate(sorted(direction.split("-")), 1)}
14
- sides_rev = {v: k for k, v in sides.items()}
15
  cache_dir_audio = p_join("download", "audio", direction)
16
  cache_dir_feature = p_join("download", "feature", direction)
17
- os.makedirs(cache_dir_audio, exist_ok=True)
18
  os.makedirs(cache_dir_feature, exist_ok=True)
 
 
19
  line_no_start = int(os.getenv("LINE_NO_START", 0))
20
- line_no_end = int(os.getenv("LINE_NO_END", 100000))
21
 
22
 
23
  def loader(feature: str) -> Dict:
@@ -25,46 +27,43 @@ def loader(feature: str) -> Dict:
25
  return json.load(f)
26
 
27
 
 
28
  files = {
29
  int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
30
  }
 
 
 
 
 
 
 
31
  audio_loader = Audio()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
-
34
- def delete_audio(target_audio_file):
35
- if os.path.exists(target_audio_file):
36
- os.remove(target_audio_file)
37
- line_no = os.path.basename(target_audio_file).split(".")[0]
38
- try:
39
- feature_file = files[int(line_no)]
40
- if os.path.exists(feature_file):
41
- os.remove(feature_file)
42
- except Exception as e:
43
- print(e)
44
-
45
-
46
- # remove broken audio files
47
- n_broken_files = 0
48
- for i in tqdm(list(range(line_no_start, line_no_end))):
49
- if i not in files:
50
- continue
51
- i = loader(files[i])
52
- for lang_side in [sides_rev[1], sides_rev[2]]:
53
- if f"{lang_side}.path" not in i:
54
- continue
55
- audio_file = i[f"{lang_side}.path"]
56
- start, end = i[f"{lang_side}.duration_start"], i[f"{lang_side}.duration_end"]
57
- if os.path.exists(audio_file):
58
- try:
59
- wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
60
- if start < end < len(wav["array"]):
61
- pass
62
- else:
63
- delete_audio(audio_file)
64
- n_broken_files += 1
65
- print(n_broken_files)
66
- except Exception as e:
67
- print(e)
68
- delete_audio(audio_file)
69
- n_broken_files += 1
70
- print(n_broken_files)
 
4
  from tqdm import tqdm
5
  from typing import Dict
6
  from glob import glob
7
+ import soundfile as sf
8
 
 
9
  from datasets import Audio
10
 
11
  # dataset config
12
+ direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
13
+ direction_text = os.getenv("DIRECTION_TEXT", "jpn")
14
+ direction = f"{direction_speech}-{direction_text}"
15
  cache_dir_audio = p_join("download", "audio", direction)
16
  cache_dir_feature = p_join("download", "feature", direction)
17
+ cache_dir_audio_fixed = p_join("download", "audio_fixed", direction, direction_speech)
18
  os.makedirs(cache_dir_feature, exist_ok=True)
19
+ os.makedirs(cache_dir_audio, exist_ok=True)
20
+ os.makedirs(cache_dir_audio_fixed, exist_ok=True)
21
  line_no_start = int(os.getenv("LINE_NO_START", 0))
22
+ line_no_end = int(os.getenv("LINE_NO_END", 100))
23
 
24
 
25
  def loader(feature: str) -> Dict:
 
27
  return json.load(f)
28
 
29
 
30
+ # feature dictionary
31
  files = {
32
  int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
33
  }
34
+ # feature files
35
+ files_list = [k for k in files.keys() if line_no_start <= k <= line_no_end]
36
+ with open(f"text.{direction}.json") as f:
37
+ line2text = {int(k): v for k, v in json.load(f).items()}
38
+ text_list = [k for k in line2text.keys() if line_no_start <= k <= line_no_end]
39
+ fixed_audio_list = [int(os.path.basename(i).replace(".mp3", "")) for i in glob(p_join(cache_dir_audio_fixed, "*"))]
40
+ # remove broken audio files
41
  audio_loader = Audio()
42
+ index_list = [i for i in list(range(line_no_start, line_no_end)) if i in files_list and i in text_list and i not in fixed_audio_list]
43
+ print(f"filtering {len(index_list)} files....")
44
+ for i in tqdm(index_list):
45
+ features = loader(files[i])
46
+ audio_file = features[f"{direction_speech}.path"]
47
+ start, end = features[f"{direction_speech}.duration_start"], features[f"{direction_speech}.duration_end"]
48
+ if os.path.exists(audio_file):
49
+ try:
50
+ wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
51
+ if start < end < len(wav["array"]):
52
+ sf.write(p_join(cache_dir_audio_fixed, f"{i}.mp3"), wav["array"][start:end], wav["sampling_rate"])
53
+ except Exception as e:
54
+ print(e)
55
+ os.remove(audio_file)
56
 
57
+ #
58
+ # print(f"features (filtered): {len(features)}")
59
+ # dataset_to_push = DatasetDict({"train": audio_dataset})
60
+ # repo_name = f"{hf_org}/{hf_dataset}"
61
+ # while True:
62
+ # try:
63
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
64
+ # break
65
+ # except Exception:
66
+ # print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
67
+ # time.sleep(60)
68
+ # json.dump(data_dict["line_no"], f)
69
+ # f_writer.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.sh CHANGED
@@ -137,11 +137,71 @@ cd ../
137
 
138
  export DIRECTION="enA-jpn"
139
 
140
- export LINE_NO_START=0
141
  export LINE_NO_END=100
142
- export DATASET_ID="dummy"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  python push_s2t_translation.py
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  ########
147
  # NLLB #
 
137
 
138
  export DIRECTION="enA-jpn"
139
 
140
+ export LINE_NO_START=50
141
  export LINE_NO_END=100
142
+
143
+
144
+ export LINE_NO_START=0
145
+ export LINE_NO_END=25000
146
+ export DATASET_ID="1"
147
+ python push_s2t_translation.py
148
+
149
+ export LINE_NO_START=25000
150
+ export LINE_NO_END=50000
151
+ export DATASET_ID="2"
152
+ python push_s2t_translation.py
153
+
154
+ export LINE_NO_START=50000
155
+ export LINE_NO_END=75000
156
+ export DATASET_ID="3"
157
  python push_s2t_translation.py
158
 
159
+ export LINE_NO_START=75000
160
+ export LINE_NO_END=100000
161
+ export DATASET_ID="4"
162
+ python push_s2t_translation.py
163
+
164
+ export LINE_NO_START=100000
165
+ export LINE_NO_END=125000
166
+ export DATASET_ID="5"
167
+ python push_s2t_translation.py
168
+
169
+ export LINE_NO_START=125000
170
+ export LINE_NO_END=150000
171
+ export DATASET_ID="6"
172
+ python push_s2t_translation.py
173
+
174
+ export LINE_NO_START=150000
175
+ export LINE_NO_END=175000
176
+ export DATASET_ID="7"
177
+ python push_s2t_translation.py
178
+
179
+ export LINE_NO_START=175000
180
+ export LINE_NO_END=200000
181
+ export DATASET_ID="8"
182
+ python push_s2t_translation.py
183
+
184
+
185
+
186
+
187
+ export LINE_NO_START=100000
188
+ export LINE_NO_END=150000
189
+ export DATASET_ID="0"
190
+ python push_s2t_translation.py
191
+
192
+
193
+ export LINE_NO_START=150000
194
+ export LINE_NO_END=300000
195
+ export DATASET_ID="0"
196
+ python push_s2t_translation.py
197
+
198
+
199
+ export LINE_NO_START=300000
200
+ export LINE_NO_END=360000
201
+ export DATASET_ID="0"
202
+ python push_s2t_translation.py
203
+
204
+
205
 
206
  ########
207
  # NLLB #