experiment-process-seamless-align / push_s2t_translation.py
asahi417's picture
init
febb4b1
raw
history blame
3.11 kB
import json
import os
import time
from os.path import join as p_join
from tqdm import tqdm
from typing import Dict
from glob import glob
from soundfile import LibsndfileError
from datasets import Dataset, Audio, DatasetDict
# dataset config
direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
direction_text = os.getenv("DIRECTION_TEXT", "jpn")
direction = f"{direction_speech}-{direction_text}"
with open(f"text.{direction}.json") as f:
line2text = json.load(f)
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_audio, exist_ok=True)
os.makedirs(cache_dir_feature, exist_ok=True)
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))
dataset_id = os.getenv("DATASET_ID", 0)
hf_org = "kotoba-tech"
hf_dataset = f"seamless-align-{direction}"
def loader(feature: str) -> Dict:
with open(feature) as f:
return json.load(f)
# create a dataset instance
files = {
int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
}
def delete_audio(target_audio_file):
if os.path.exists(target_audio_file):
os.remove(target_audio_file)
line_no = os.path.basename(target_audio_file).split(".")[0]
try:
feature_file = files[int(line_no)]
if os.path.exists(feature_file):
os.remove(feature_file)
except Exception as e:
print(e)
# remove broken audio files
print("filtering....")
features = []
audio_loader = Audio()
for i in tqdm(list(range(line_no_start, line_no_end))):
if i not in files:
continue
i = loader(files[i])
i[f"{direction_text}.text"] = line2text[str(i["line_no"])]
audio_file = i.pop(f"{direction_speech}.path")
start, end = i[f"{direction_speech}.duration_start"], i[f"{direction_speech}.duration_end"]
if os.path.exists(audio_file):
try:
wav = audio_loader.decode_example({"path": audio_file, "bytes": None})
if start < end < len(wav["array"]):
wav["array"] = wav["array"][start:end]
i[f"{direction_speech}.audio"] = wav
features.append(i)
else:
delete_audio(audio_file)
except Exception as e:
print(e)
delete_audio(audio_file)
print(f"features (filtered): {len(features)}")
keys = features[0].keys()
data_dict = {k: [i[k] for i in features] for k in keys}
audio_dataset = Dataset.from_dict(data_dict)
audio_dataset = audio_dataset.cast_column(f"{direction_speech}.audio", Audio())
dataset_to_push = DatasetDict({"train": audio_dataset})
repo_name = f"{hf_org}/{hf_dataset}"
while True:
try:
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
break
except Exception:
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
time.sleep(60)
os.makedirs("log", exist_ok=True)
with open(f"log/pushed.line_no.{dataset_id}.json", "w") as f:
json.dump(data_dict["line_no"], f)