|
import json |
|
import os |
|
import time |
|
from os.path import join as p_join |
|
from tqdm import tqdm |
|
from typing import Dict |
|
from glob import glob |
|
|
|
from soundfile import LibsndfileError |
|
from datasets import Dataset, Audio, DatasetDict |
|
|
|
|
|
direction = os.getenv("DIRECTION", "enA-jaA") |
|
sides = {i: n for n, i in enumerate(sorted(direction.split("-")), 1)} |
|
sides_rev = {v: k for k, v in sides.items()} |
|
cache_dir_audio = p_join("download", "audio", direction) |
|
cache_dir_feature = p_join("download", "feature", direction) |
|
os.makedirs(cache_dir_audio, exist_ok=True) |
|
os.makedirs(cache_dir_feature, exist_ok=True) |
|
line_no_start = int(os.getenv("LINE_NO_START", 0)) |
|
line_no_end = int(os.getenv("LINE_NO_END", 10000)) |
|
dataset_id = int(os.getenv("DATASET_ID", 0)) |
|
hf_org = "kotoba-tech" |
|
hf_dataset = f"seamless-align-{direction}" |
|
|
|
|
|
def loader(feature: str) -> Dict: |
|
with open(feature) as f: |
|
return json.load(f) |
|
|
|
|
|
|
|
|
|
files = { |
|
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json")) |
|
} |
|
file_ids = [i for i in range(line_no_start, line_no_end) if i in files] |
|
features = [loader(files[i]) for i in file_ids] |
|
print(f"features: {len(features)}") |
|
features = [i for i in features if os.path.exists(i[f"{sides_rev[1]}.path"]) and os.path.exists(i[f"{sides_rev[2]}.path"])] |
|
print(f"features (filtered): {len(features)}") |
|
data_dict = { |
|
f"{sides_rev[1]}.audio": [i.pop(f"{sides_rev[1]}.path") for i in features], |
|
f"{sides_rev[2]}.audio": [i.pop(f"{sides_rev[2]}.path") for i in features] |
|
} |
|
keys = features[0].keys() |
|
data_dict.update( |
|
{k: [i[k] for i in features] for k in keys} |
|
) |
|
audio_dataset = Dataset.from_dict(data_dict) |
|
audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio()) |
|
audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio()) |
|
|
|
|
|
broken_files = [] |
|
for i in tqdm(range(len(audio_dataset))): |
|
try: |
|
a = audio_dataset[i] |
|
flag = True |
|
for side_id in sides_rev.keys(): |
|
start = a[f"{sides_rev[side_id]}.duration_start"] |
|
end = a[f"{sides_rev[side_id]}.duration_end"] |
|
array = a[f"{sides_rev[side_id]}.audio"]["array"] |
|
flag = 0 < start < end < len(array) |
|
if not flag: |
|
broken_files.append(i) |
|
except LibsndfileError: |
|
broken_files.append(i) |
|
continue |
|
print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}") |
|
if len(broken_files) > 0: |
|
print(f"found {len(broken_files)} broken files:") |
|
flag = input("delete the broken files? (y/n): ") |
|
if flag == "y": |
|
|
|
for i in broken_files: |
|
if os.path.exists(files[file_ids[i]]): |
|
os.remove(files[file_ids[i]]) |
|
for side_id in sides_rev.keys(): |
|
if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]): |
|
os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i]) |
|
valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files] |
|
audio_dataset_valid = audio_dataset.select(valid_data_id) |
|
|
|
|
|
|
|
def clip_audio(batch): |
|
for side_id in sides_rev.keys(): |
|
start = batch[f"{sides_rev[side_id]}.duration_start"] |
|
end = batch[f"{sides_rev[side_id]}.duration_end"] |
|
audio = batch[f"{sides_rev[side_id]}.audio"] |
|
batch[f"{sides_rev[side_id]}.audio"] = [ |
|
{"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]} |
|
for a, s, e in zip(audio, start, end) |
|
] |
|
return batch |
|
|
|
|
|
audio_dataset_valid = audio_dataset_valid.map( |
|
function=clip_audio, |
|
batched=True, |
|
batch_size=128, |
|
num_proc=1, |
|
desc="clipping audio based on the duration:" |
|
) |
|
|
|
dataset_to_push = DatasetDict({"train": audio_dataset_valid}) |
|
repo_name = f"{hf_org}/{hf_dataset}" |
|
while True: |
|
try: |
|
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}") |
|
break |
|
except Exception: |
|
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...") |
|
time.sleep(60) |
|
|