File size: 6,719 Bytes
f4b03a9 0021056 6ed1c54 7210459 3524295 0021056 6ed1c54 3524295 6ed1c54 3524295 28ad598 3524295 f4b03a9 cc01e7a af5bf83 f4b03a9 fdf60eb 3524295 f4b03a9 ab289ef 4d206b1 2c3b1f3 3524295 6ed1c54 cc01e7a f0dde66 7210459 6ed1c54 84f9b19 6ed1c54 cc01e7a 6ed1c54 0021056 f4b03a9 d9a6351 cc01e7a 6697bc7 cc01e7a cccf8d7 f4b03a9 fb589be 0021056 6171564 f4b03a9 6171564 f4b03a9 6171564 4fefb4c 3524295 f4b03a9 4fefb4c f4b03a9 3524295 ab289ef f4b03a9 04ec25f 0021056 f4b03a9 23942b2 1125f7c 81a79fe 1125f7c f4b03a9 cccf8d7 fdf60eb f4b03a9 1125f7c f4b03a9 fdf60eb cc01e7a 0021056 3524295 0021056 f4b03a9 28ad598 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
import json
import os
import tarfile
import zipfile
import gzip
import subprocess
import time
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional, Dict
from glob import glob
import pandas as pd
import soundfile as sf
from datasets import Dataset, Audio, DatasetDict
audio_loader = Audio()
# dataset config
url_metadata_dict = {
"enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
"enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
}
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_feature, exist_ok=True)
for s in sides:
os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
# processor config
n_pool = int(os.getenv("N_POOL", 8))
wget_max_retry = os.getenv("MAX_RETRY", "2")
wget_timeout = os.getenv("TIMEOUT", "30")
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))
dataset_id = os.getenv("DATASET_ID", 0)
hf_org = os.getenv("HF_ORG", "asahi417")
hf_dataset = f"seamless-align-{direction}"
def wget(url: str, output_file: Optional[str] = None):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
if not os.path.exists(output_file):
return False
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
if output_file.endswith('.tar'):
tar = tarfile.open(output_file)
else:
tar = tarfile.open(output_file, "r:gz")
tar.extractall(os.path.dirname(output_file))
tar.close()
os.remove(output_file)
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'rb') as f:
with open(output_file.replace('.gz', ''), 'wb') as f_write:
f_write.write(f.read())
os.remove(output_file)
elif output_file.endswith('.zip'):
with zipfile.ZipFile(output_file, 'r') as zip_ref:
zip_ref.extractall()
os.remove(output_file)
return True
def get_metadata():
url_metadata = url_metadata_dict[direction]
meta_data_filename = os.path.basename(url_metadata)
meta_data_path = p_join("download", "meta", meta_data_filename)
if not os.path.exists(meta_data_path.replace(".gz", "")):
assert wget(url_metadata, output_file=meta_data_path)
df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
if direction == "enA-jpn":
df = df[df["side"] == "enA"]
assert len(df["direction"].unique()) == 1
df.pop("direction")
return df.sort_values(by=["line_no", "side"])
def to_json_serializable(val):
if "float" in str(type(val)):
return float(val)
if "int" in str(type(val)):
return int(val)
return str(val)
def get_audio(dataframe: pd.DataFrame):
features = {"line_no": int(dataframe.pop('line_no').values[0])}
for side, df in dataframe.groupby("side"):
df.pop("side")
features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"]
if not os.path.exists(features[f"{side}.path"]):
flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
if not flag:
return False
else:
try:
wav = audio_loader.decode_example({"path": features[f"{side}.path"], "bytes": None})
if start < end < len(wav["array"]):
sf.write(features[f"{side}.path"], wav["array"][start:end], wav["sampling_rate"])
else:
os.remove(features[f"{side}.path"])
return False
except Exception as e:
print(e)
os.remove(features[f"{side}.path"])
return False
with open(p_join(cache_dir_feature, f'{features["line_no"]}.json'), "w") as f:
json.dump(features, f)
return True
def process_dataset():
df_metadata = get_metadata()
print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
inputs = [
g for line_no, g in df_metadata.groupby("line_no")
if line_no_start <= line_no < line_no_end and not os.path.exists(
p_join(cache_dir_feature, f'{int(line_no)}.json')
)
]
print(f"filtered unique lines: {len(inputs)}")
if direction == "enA-jaA":
inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
print(f"removed side != 2: {len(inputs)}")
if n_pool == 1:
for g in tqdm(inputs, total=len(inputs)):
flag = get_audio(g)
if not flag:
print(f"failed:\n{g['url']}")
else:
with Pool(n_pool) as pool:
pool.map(get_audio, tqdm(inputs, total=len(inputs)))
def loader(feature: str) -> Dict:
with open(feature) as f_reader:
return json.load(f_reader)
features = [loader(i) for i in glob(p_join(cache_dir_feature, '*.json'))]
print(f"push {len(features)} records to hub")
data_dict = {}
for side in sides:
data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]})
data_dict.update({k: [i[k] for i in features] for k in features[0].keys()})
audio_dataset = Dataset.from_dict(data_dict)
for side in sides:
audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
dataset_to_push = DatasetDict({"train": audio_dataset})
repo_name = f"{hf_org}/{hf_dataset}"
while True:
try:
dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
break
except Exception:
print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
time.sleep(60)
if __name__ == '__main__':
process_dataset()
|