|
import json |
|
import os |
|
import time |
|
from os.path import join as p_join |
|
from tqdm import tqdm |
|
from typing import Dict |
|
from glob import glob |
|
|
|
from soundfile import LibsndfileError |
|
from datasets import Dataset, Audio, DatasetDict |
|
|
|
|
|
direction_speech = os.getenv("DIRECTION_SPEECH", "enA") |
|
direction_text = os.getenv("DIRECTION_TEXT", "jpn") |
|
direction = f"{direction_speech}-{direction_text}" |
|
with open(f"text.{direction}.json") as f: |
|
line2text = json.load(f) |
|
cache_dir_audio = p_join("download", "audio", direction) |
|
cache_dir_feature = p_join("download", "feature", direction) |
|
os.makedirs(cache_dir_audio, exist_ok=True) |
|
os.makedirs(cache_dir_feature, exist_ok=True) |
|
line_no_start = int(os.getenv("LINE_NO_START", 0)) |
|
line_no_end = int(os.getenv("LINE_NO_END", 10000)) |
|
dataset_id = int(os.getenv("DATASET_ID", 0)) |
|
hf_org = "kotoba-tech" |
|
hf_dataset = f"seamless-align-{direction}" |
|
|
|
|
|
def loader(feature: str) -> Dict: |
|
with open(feature) as f: |
|
return json.load(f) |
|
|
|
|
|
|
|
|
|
files = { |
|
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json")) |
|
} |
|
|
|
|
|
def delete_audio(target_audio_file): |
|
if os.path.exists(target_audio_file): |
|
os.remove(target_audio_file) |
|
line_no = os.path.basename(target_audio_file).split(".")[0] |
|
try: |
|
feature_file = files[int(line_no)] |
|
if os.path.exists(feature_file): |
|
os.remove(feature_file) |
|
except Exception as e: |
|
print(e) |
|
|
|
|
|
|
|
features = [] |
|
audio_loader = Audio() |
|
for i in tqdm(list(range(line_no_start, line_no_end))): |
|
if i in files: |
|
continue |
|
i = loader(files[i]) |
|
i[f"{direction_text}.text"] = line2text[str(i)] |
|
audio_file = i[f"{direction_speech}.path"] |
|
start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"] |
|
if os.path.exists(audio_file): |
|
try: |
|
wav = audio_loader.decode_example({"path": audio_file, "bytes": None}) |
|
if start < end < len(wav["array"]): |
|
features.append(i) |
|
else: |
|
delete_audio(audio_file) |
|
except Exception as e: |
|
print(e) |
|
delete_audio(audio_file) |
|
|
|
|
|
print(f"features (filtered): {len(features)}") |
|
data_dict = {f"{direction_speech}.audio": [i.pop(f"{direction_speech}.path") for i in features]} |
|
keys = features[0].keys() |
|
data_dict.update({k: [i[k] for i in features] for k in keys}) |
|
audio_dataset = Dataset.from_dict(data_dict) |
|
audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio()) |
|
|
|
|
|
|
|
def clip_audio(batch): |
|
start = batch[f"{direction_speech}.duration_start"] |
|
end = batch[f"{direction_speech}.duration_end"] |
|
audio = batch[f"{direction_speech}.audio"] |
|
batch[f"{direction_speech}.audio"] = [ |
|
{"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]} |
|
for a, s, e in zip(audio, start, end) |
|
] |
|
return batch |
|
|
|
|
|
audio_dataset_valid = audio_dataset_valid.map( |
|
function=clip_audio, |
|
batched=True, |
|
batch_size=128, |
|
num_proc=1, |
|
desc="clipping audio based on the duration:" |
|
) |
|
|
|
dataset_to_push = DatasetDict({"train": audio_dataset_valid}) |
|
repo_name = f"{hf_org}/{hf_dataset}" |
|
while True: |
|
try: |
|
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}") |
|
break |
|
except Exception: |
|
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...") |
|
time.sleep(60) |
|
os.makedirs("log", exist_ok=True) |
|
with open(f"log/pushed.line_no.{dataset_id}.json", "w") as f: |
|
json.dump(data_dict["line_no"], f) |