new ui
Browse files- download_audio.py +4 -2
- filter_audio.py +0 -51
- main.sh +8 -4
- push_s2s_translation.py +0 -115
- push_s2t_translation.py +0 -94
download_audio.py
CHANGED
@@ -136,8 +136,10 @@ def process_dataset():
|
|
136 |
if not flag:
|
137 |
print(f"failed:\n{g['url']}")
|
138 |
else:
|
139 |
-
|
140 |
-
|
|
|
|
|
141 |
|
142 |
|
143 |
def loader(feature: str) -> Dict:
|
|
|
136 |
if not flag:
|
137 |
print(f"failed:\n{g['url']}")
|
138 |
else:
|
139 |
+
for i in range(0, len(inputs), n_pool):
|
140 |
+
batch_inputs = inputs[i: min(len(inputs), i + n_pool)]
|
141 |
+
with Pool(n_pool) as pool:
|
142 |
+
pool.map(get_audio, tqdm(batch_inputs, total=len(batch_inputs)))
|
143 |
|
144 |
|
145 |
def loader(feature: str) -> Dict:
|
filter_audio.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from os.path import join as p_join
|
4 |
-
from tqdm import tqdm
|
5 |
-
from typing import Dict
|
6 |
-
from glob import glob
|
7 |
-
import soundfile as sf
|
8 |
-
|
9 |
-
from datasets import Audio
|
10 |
-
|
11 |
-
# dataset config
|
12 |
-
direction = os.getenv("DIRECTION", "enA-jpn")
|
13 |
-
direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
|
14 |
-
cache_dir_audio = p_join("download", "audio", direction)
|
15 |
-
cache_dir_feature = p_join("download", "feature", direction)
|
16 |
-
cache_dir_audio_fixed = p_join("download", "audio_fixed", direction, direction_speech)
|
17 |
-
os.makedirs(cache_dir_feature, exist_ok=True)
|
18 |
-
os.makedirs(cache_dir_audio, exist_ok=True)
|
19 |
-
os.makedirs(cache_dir_audio_fixed, exist_ok=True)
|
20 |
-
line_no_start = int(os.getenv("LINE_NO_START", 0))
|
21 |
-
line_no_end = int(os.getenv("LINE_NO_END", 100))
|
22 |
-
|
23 |
-
|
24 |
-
def loader(feature: str) -> Dict:
|
25 |
-
with open(feature) as f_reader:
|
26 |
-
return json.load(f_reader)
|
27 |
-
|
28 |
-
|
29 |
-
# feature dictionary
|
30 |
-
files = {
|
31 |
-
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
|
32 |
-
}
|
33 |
-
# feature files
|
34 |
-
files_list = [k for k in files.keys() if line_no_start <= k <= line_no_end]
|
35 |
-
fixed_audio_list = [int(os.path.basename(i).replace(".mp3", "")) for i in glob(p_join(cache_dir_audio_fixed, "*"))]
|
36 |
-
# remove broken audio files
|
37 |
-
audio_loader = Audio()
|
38 |
-
index_list = [i for i in list(range(line_no_start, line_no_end)) if i in files_list and i not in fixed_audio_list]
|
39 |
-
print(f"filtering {len(index_list)} files....")
|
40 |
-
for i in tqdm(index_list):
|
41 |
-
features = loader(files[i])
|
42 |
-
audio_file = features[f"{direction_speech}.path"]
|
43 |
-
start, end = features[f"{direction_speech}.duration_start"], features[f"{direction_speech}.duration_end"]
|
44 |
-
if os.path.exists(audio_file):
|
45 |
-
try:
|
46 |
-
wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
|
47 |
-
if start < end < len(wav["array"]):
|
48 |
-
sf.write(p_join(cache_dir_audio_fixed, f"{i}.mp3"), wav["array"][start:end], wav["sampling_rate"])
|
49 |
-
except Exception as e:
|
50 |
-
print(e)
|
51 |
-
os.remove(audio_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main.sh
CHANGED
@@ -32,14 +32,20 @@ export LINE_NO_END=$((DATASET_ID * 5000))
|
|
32 |
python download_audio.py
|
33 |
|
34 |
|
35 |
-
export DATASET_ID=
|
36 |
export DIRECTION="enA-jaA"
|
37 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
38 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
39 |
python download_audio.py
|
40 |
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
-
|
|
|
43 |
export DIRECTION="enA-jaA"
|
44 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
45 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
@@ -72,8 +78,6 @@ export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
|
72 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
73 |
python download_audio.py
|
74 |
|
75 |
-
|
76 |
-
|
77 |
export DATASET_ID=22
|
78 |
export DIRECTION="enA-jaA"
|
79 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
|
|
32 |
python download_audio.py
|
33 |
|
34 |
|
35 |
+
export DATASET_ID=11
|
36 |
export DIRECTION="enA-jaA"
|
37 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
38 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
39 |
python download_audio.py
|
40 |
|
41 |
+
export DATASET_ID=13
|
42 |
+
export DIRECTION="enA-jaA"
|
43 |
+
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
44 |
+
export LINE_NO_END=$((DATASET_ID * 5000))
|
45 |
+
python download_audio.py
|
46 |
|
47 |
+
|
48 |
+
export DATASET_ID=15
|
49 |
export DIRECTION="enA-jaA"
|
50 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
51 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
|
|
78 |
export LINE_NO_END=$((DATASET_ID * 5000))
|
79 |
python download_audio.py
|
80 |
|
|
|
|
|
81 |
export DATASET_ID=22
|
82 |
export DIRECTION="enA-jaA"
|
83 |
export LINE_NO_START=$(((DATASET_ID-1) * 5000))
|
push_s2s_translation.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
from os.path import join as p_join
|
5 |
-
from tqdm import tqdm
|
6 |
-
from typing import Dict
|
7 |
-
from glob import glob
|
8 |
-
|
9 |
-
from soundfile import LibsndfileError
|
10 |
-
from datasets import Dataset, Audio, DatasetDict
|
11 |
-
|
12 |
-
# dataset config
|
13 |
-
direction = os.getenv("DIRECTION", "enA-jaA")
|
14 |
-
sides = {i: n for n, i in enumerate(sorted(direction.split("-")), 1)}
|
15 |
-
sides_rev = {v: k for k, v in sides.items()}
|
16 |
-
cache_dir_audio = p_join("download", "audio", direction)
|
17 |
-
cache_dir_feature = p_join("download", "feature", direction)
|
18 |
-
os.makedirs(cache_dir_audio, exist_ok=True)
|
19 |
-
os.makedirs(cache_dir_feature, exist_ok=True)
|
20 |
-
line_no_start = int(os.getenv("LINE_NO_START", 0))
|
21 |
-
line_no_end = int(os.getenv("LINE_NO_END", 10000))
|
22 |
-
dataset_id = int(os.getenv("DATASET_ID", 0))
|
23 |
-
hf_org = "kotoba-tech"
|
24 |
-
hf_dataset = f"seamless-align-{direction}"
|
25 |
-
|
26 |
-
|
27 |
-
def loader(feature: str) -> Dict:
|
28 |
-
with open(feature) as f:
|
29 |
-
return json.load(f)
|
30 |
-
|
31 |
-
|
32 |
-
# create a dataset instance
|
33 |
-
|
34 |
-
files = {
|
35 |
-
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
|
36 |
-
}
|
37 |
-
file_ids = [i for i in range(line_no_start, line_no_end) if i in files]
|
38 |
-
features = [loader(files[i]) for i in file_ids]
|
39 |
-
print(f"features: {len(features)}")
|
40 |
-
features = [i for i in features if os.path.exists(i[f"{sides_rev[1]}.path"]) and os.path.exists(i[f"{sides_rev[2]}.path"])]
|
41 |
-
print(f"features (filtered): {len(features)}")
|
42 |
-
data_dict = {
|
43 |
-
f"{sides_rev[1]}.audio": [i.pop(f"{sides_rev[1]}.path") for i in features],
|
44 |
-
f"{sides_rev[2]}.audio": [i.pop(f"{sides_rev[2]}.path") for i in features]
|
45 |
-
}
|
46 |
-
keys = features[0].keys()
|
47 |
-
data_dict.update(
|
48 |
-
{k: [i[k] for i in features] for k in keys}
|
49 |
-
)
|
50 |
-
audio_dataset = Dataset.from_dict(data_dict)
|
51 |
-
audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio())
|
52 |
-
audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio())
|
53 |
-
|
54 |
-
# remove instances with broken audio files
|
55 |
-
broken_files = []
|
56 |
-
for i in tqdm(range(len(audio_dataset))):
|
57 |
-
try:
|
58 |
-
a = audio_dataset[i]
|
59 |
-
flag = True
|
60 |
-
for side_id in sides_rev.keys():
|
61 |
-
start = a[f"{sides_rev[side_id]}.duration_start"]
|
62 |
-
end = a[f"{sides_rev[side_id]}.duration_end"]
|
63 |
-
array = a[f"{sides_rev[side_id]}.audio"]["array"]
|
64 |
-
flag = 0 < start < end < len(array)
|
65 |
-
if not flag:
|
66 |
-
broken_files.append(i)
|
67 |
-
except LibsndfileError:
|
68 |
-
broken_files.append(i)
|
69 |
-
continue
|
70 |
-
print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}")
|
71 |
-
if len(broken_files) > 0:
|
72 |
-
print(f"found {len(broken_files)} broken files:")
|
73 |
-
flag = input("delete the broken files? (y/n): ")
|
74 |
-
if flag == "y":
|
75 |
-
# remove broken files
|
76 |
-
for i in broken_files:
|
77 |
-
if os.path.exists(files[file_ids[i]]):
|
78 |
-
os.remove(files[file_ids[i]])
|
79 |
-
for side_id in sides_rev.keys():
|
80 |
-
if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]):
|
81 |
-
os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i])
|
82 |
-
valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files]
|
83 |
-
audio_dataset_valid = audio_dataset.select(valid_data_id)
|
84 |
-
|
85 |
-
|
86 |
-
# trim the audio according to the duration
|
87 |
-
def clip_audio(batch):
|
88 |
-
for side_id in sides_rev.keys():
|
89 |
-
start = batch[f"{sides_rev[side_id]}.duration_start"]
|
90 |
-
end = batch[f"{sides_rev[side_id]}.duration_end"]
|
91 |
-
audio = batch[f"{sides_rev[side_id]}.audio"]
|
92 |
-
batch[f"{sides_rev[side_id]}.audio"] = [
|
93 |
-
{"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]}
|
94 |
-
for a, s, e in zip(audio, start, end)
|
95 |
-
]
|
96 |
-
return batch
|
97 |
-
|
98 |
-
|
99 |
-
audio_dataset_valid = audio_dataset_valid.map(
|
100 |
-
function=clip_audio,
|
101 |
-
batched=True,
|
102 |
-
batch_size=128,
|
103 |
-
num_proc=1,
|
104 |
-
desc="clipping audio based on the duration:"
|
105 |
-
)
|
106 |
-
|
107 |
-
dataset_to_push = DatasetDict({"train": audio_dataset_valid})
|
108 |
-
repo_name = f"{hf_org}/{hf_dataset}"
|
109 |
-
while True:
|
110 |
-
try:
|
111 |
-
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
|
112 |
-
break
|
113 |
-
except Exception:
|
114 |
-
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
|
115 |
-
time.sleep(60)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
push_s2t_translation.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
from os.path import join as p_join
|
5 |
-
from tqdm import tqdm
|
6 |
-
from typing import Dict
|
7 |
-
from glob import glob
|
8 |
-
|
9 |
-
from soundfile import LibsndfileError
|
10 |
-
from datasets import Dataset, Audio, DatasetDict
|
11 |
-
|
12 |
-
# dataset config
|
13 |
-
direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
|
14 |
-
direction_text = os.getenv("DIRECTION_TEXT", "jpn")
|
15 |
-
direction = f"{direction_speech}-{direction_text}"
|
16 |
-
with open(f"text.{direction}.json") as f:
|
17 |
-
line2text = json.load(f)
|
18 |
-
cache_dir_audio = p_join("download", "audio", direction)
|
19 |
-
cache_dir_feature = p_join("download", "feature", direction)
|
20 |
-
os.makedirs(cache_dir_audio, exist_ok=True)
|
21 |
-
os.makedirs(cache_dir_feature, exist_ok=True)
|
22 |
-
line_no_start = int(os.getenv("LINE_NO_START", 0))
|
23 |
-
line_no_end = int(os.getenv("LINE_NO_END", 10000))
|
24 |
-
dataset_id = os.getenv("DATASET_ID", 0)
|
25 |
-
hf_org = "kotoba-tech"
|
26 |
-
hf_dataset = f"seamless-align-{direction}"
|
27 |
-
|
28 |
-
|
29 |
-
def loader(feature: str) -> Dict:
|
30 |
-
with open(feature) as f:
|
31 |
-
return json.load(f)
|
32 |
-
|
33 |
-
|
34 |
-
# create a dataset instance
|
35 |
-
|
36 |
-
files = {
|
37 |
-
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
|
38 |
-
}
|
39 |
-
|
40 |
-
|
41 |
-
def delete_audio(target_audio_file):
|
42 |
-
if os.path.exists(target_audio_file):
|
43 |
-
os.remove(target_audio_file)
|
44 |
-
line_no = os.path.basename(target_audio_file).split(".")[0]
|
45 |
-
try:
|
46 |
-
feature_file = files[int(line_no)]
|
47 |
-
if os.path.exists(feature_file):
|
48 |
-
os.remove(feature_file)
|
49 |
-
except Exception as e:
|
50 |
-
print(e)
|
51 |
-
|
52 |
-
|
53 |
-
# remove broken audio files
|
54 |
-
print("filtering....")
|
55 |
-
features = []
|
56 |
-
audio_loader = Audio()
|
57 |
-
for i in tqdm(list(range(line_no_start, line_no_end))):
|
58 |
-
if i not in files:
|
59 |
-
continue
|
60 |
-
i = loader(files[i])
|
61 |
-
i[f"{direction_text}.text"] = line2text[str(i["line_no"])]
|
62 |
-
audio_file = i.pop(f"{direction_speech}.path")
|
63 |
-
start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"]
|
64 |
-
if os.path.exists(audio_file):
|
65 |
-
try:
|
66 |
-
wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
|
67 |
-
if start < end < len(wav["array"]):
|
68 |
-
wav["array"] = wav["array"][start:end]
|
69 |
-
i[f"{direction_speech}.audio"] = wav
|
70 |
-
features.append(i)
|
71 |
-
else:
|
72 |
-
delete_audio(audio_file)
|
73 |
-
except Exception as e:
|
74 |
-
print(e)
|
75 |
-
delete_audio(audio_file)
|
76 |
-
|
77 |
-
|
78 |
-
print(f"features (filtered): {len(features)}")
|
79 |
-
keys = features[0].keys()
|
80 |
-
data_dict = {k: [i[k] for i in features] for k in keys}
|
81 |
-
audio_dataset = Dataset.from_dict(data_dict)
|
82 |
-
audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio())
|
83 |
-
dataset_to_push = DatasetDict({"train": audio_dataset})
|
84 |
-
repo_name = f"{hf_org}/{hf_dataset}"
|
85 |
-
while True:
|
86 |
-
try:
|
87 |
-
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
|
88 |
-
break
|
89 |
-
except Exception:
|
90 |
-
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
|
91 |
-
time.sleep(60)
|
92 |
-
os.makedirs("log", exist_ok=True)
|
93 |
-
with open(f"log/pushed.line_no.{dataset_id}.json", "w") as f:
|
94 |
-
json.dump(data_dict["line_no"], f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|