asahi417's picture
init
fb589be
raw
history blame
4.27 kB
import json
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional
import pandas as pd
# dataset config
url_metadata_dict = {
"enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
"enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
}
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
# processor config
n_pool = int(os.getenv("N_POOL", 8))
wget_max_retry = os.getenv("MAX_RETRY", "1")
wget_timeout = os.getenv("TIMEOUT", "20")
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 500000))
def wget(url: str, cache_dir: str, filename: Optional[str] = None):
os.makedirs(cache_dir, exist_ok=True)
filename = os.path.basename(url) if not filename else filename
output_file = p_join(cache_dir, filename)
subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
if not os.path.exists(output_file):
return False
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
if output_file.endswith('.tar'):
tar = tarfile.open(output_file)
else:
tar = tarfile.open(output_file, "r:gz")
tar.extractall(cache_dir)
tar.close()
os.remove(output_file)
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'rb') as f:
with open(output_file.replace('.gz', ''), 'wb') as f_write:
f_write.write(f.read())
os.remove(output_file)
elif output_file.endswith('.zip'):
with zipfile.ZipFile(output_file, 'r') as zip_ref:
zip_ref.extractall(cache_dir)
os.remove(output_file)
return True
def get_metadata():
url_metadata = url_metadata_dict[direction]
meta_data_filename = os.path.basename(url_metadata).replace(".gz", "")
cache_dir_metadata = p_join("download", "meta")
if not os.path.exists(p_join(cache_dir_metadata, meta_data_filename)):
assert wget(url_metadata, cache_dir=cache_dir_metadata)
df = pd.read_csv(p_join(cache_dir_metadata, meta_data_filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
assert len(df["direction"].unique()) == 1
df.pop("direction")
return df.sort_values(by=["line_no", "side"])
def get_audio(dataframe: pd.DataFrame):
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
features = {"line_no": dataframe.pop('line_no').values[0]}
for side, df in dataframe.groupby("side"):
df.pop("side")
features.update({f"{side}.{k}": v for k, v in df.iloc[0].to_dict().items()})
features[f"{side}.path"] = p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"]))
if not os.path.exists(features[f"{side}.path"]):
if not wget(features[f"{side}.url"], filename=features[f"{side}.path"], cache_dir=cache_dir_audio):
return False
with open(cache_dir_feature, "w") as f:
json.dump(features, f)
return True
def process_dataset():
df_metadata = get_metadata()
print(f"metadata: {len(df_metadata)}")
inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end]
print(f"filtered unique lines: {len(inputs)}")
inputs = [g for g in inputs if len(g) == 2]
print(f"removed != 2: {len(inputs)}")
inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
print(f"removed side != 2: {len(inputs)}")
if n_pool == 1:
for g in tqdm(inputs, total=len(inputs)):
if not get_audio(g):
print(f"failed:\n{g['url']}")
else:
with Pool(n_pool) as pool:
pool.starmap(get_audio, tqdm(inputs, total=len(inputs)))
if __name__ == '__main__':
process_dataset()