File size: 4,169 Bytes
4fefb4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
befd6a5
4fefb4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
befd6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fefb4c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
befd6a5
4fefb4c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import json
import os
import time
from os.path import join as p_join
from tqdm import tqdm
from typing import Dict
from glob import glob

from soundfile import LibsndfileError
from datasets import Dataset, Audio, DatasetDict

# dataset config
direction = os.getenv("DIRECTION", "enA-jaA")
sides = {i: n for n, i in enumerate(sorted(direction.split("-")), 1)}
sides_rev = {v: k for k, v in sides.items()}
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_audio, exist_ok=True)
os.makedirs(cache_dir_feature, exist_ok=True)
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))
dataset_id = int(os.getenv("DATASET_ID", 0))
hf_org = "kotoba-tech"
hf_dataset = f"seamless-align-{direction}"


def loader(feature: str) -> Dict:
    with open(feature) as f:
        return json.load(f)


# create a dataset instance

files = {
    int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
}
file_ids = [i for i in range(line_no_start, line_no_end) if i in files]
features = [loader(files[i]) for i in file_ids]
print(f"features: {len(features)}")
features = [i for i in features if os.path.exists(i[f"{sides_rev[1]}.path"]) and os.path.exists(i[f"{sides_rev[2]}.path"])]
print(f"features (filtered): {len(features)}")
data_dict = {
    f"{sides_rev[1]}.audio": [i.pop(f"{sides_rev[1]}.path") for i in features],
    f"{sides_rev[2]}.audio": [i.pop(f"{sides_rev[2]}.path") for i in features]
}
keys = features[0].keys()
data_dict.update(
    {k: [i[k] for i in features] for k in keys}
)
audio_dataset = Dataset.from_dict(data_dict)
audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio())
audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio())

# remove instances with broken audio files
broken_files = []
for i in tqdm(range(len(audio_dataset))):
    try:
        a = audio_dataset[i]
        flag = True
        for side_id in sides_rev.keys():
            start = a[f"{sides_rev[side_id]}.duration_start"]
            end = a[f"{sides_rev[side_id]}.duration_end"]
            array = a[f"{sides_rev[side_id]}.audio"]["array"]
            flag = 0 < start < end < len(array)
        if not flag:
            broken_files.append(i)
    except LibsndfileError:
        broken_files.append(i)
        continue
print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}")
if len(broken_files) > 0:
    print(f"found {len(broken_files)} broken files:")
    flag = input("delete the broken files? (y/n): ")
    if flag == "y":
        # remove broken files
        for i in broken_files:
            if os.path.exists(files[file_ids[i]]):
                os.remove(files[file_ids[i]])
            for side_id in sides_rev.keys():
                if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]):
                    os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i])
valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files]
audio_dataset_valid = audio_dataset.select(valid_data_id)


# trim the audio according to the duration
def clip_audio(batch):
    for side_id in sides_rev.keys():
        start = batch[f"{sides_rev[side_id]}.duration_start"]
        end = batch[f"{sides_rev[side_id]}.duration_end"]
        audio = batch[f"{sides_rev[side_id]}.audio"]
        batch[f"{sides_rev[side_id]}.audio"] = [
            {"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]}
            for a, s, e in zip(audio, start, end)
        ]
    return batch


audio_dataset_valid = audio_dataset_valid.map(
    function=clip_audio,
    batched=True,
    batch_size=128,
    num_proc=1,
    desc="clipping audio based on the duration:"
)

dataset_to_push = DatasetDict({"train": audio_dataset_valid})
repo_name = f"{hf_org}/{hf_dataset}"
while True:
    try:
        dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
        break
    except Exception:
        print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
        time.sleep(60)