File size: 3,567 Bytes
6d42748
 
 
0021056
6ed1c54
 
 
7210459
0021056
 
6ed1c54
 
 
 
28ad598
0021056
28ad598
 
adebd8b
 
 
fdf60eb
f0dde66
 
 
6ed1c54
 
 
 
 
 
f0dde66
7210459
6ed1c54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0021056
 
 
 
 
e4e64be
 
0021056
 
 
 
 
e4e64be
 
04ec25f
0021056
 
 
 
6ed1c54
4903d7b
 
 
e4e64be
df24029
fdf60eb
 
 
e4e64be
fdf60eb
 
 
 
0021056
 
 
f0dde66
 
 
 
 
 
28ad598
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
"""
https://stackoverflow.com/questions/31353244/how-to-config-wget-to-retry-more-than-20
"""
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional

import pandas as pd


url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
cache_dir_root = "download"
cache_dir_audio = p_join(cache_dir_root, "audio")
cache_dir_metadata = p_join(cache_dir_root, "meta")
n_pool = int(os.getenv("N_POOL", 8))
wget_max_retry = os.getenv("MAX_RETRY", "1")
wget_timeout = os.getenv("TIMEOUT", "20")
data = os.getenv("DATA", "s2s")


def wget(url: str, cache_dir: str, filename: Optional[str] = None):
    os.makedirs(cache_dir, exist_ok=True)
    filename = os.path.basename(url) if not filename else filename
    output_file = p_join(cache_dir, filename)
    subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
    if not os.path.exists(output_file):
        return False
    if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
        if output_file.endswith('.tar'):
            tar = tarfile.open(output_file)
        else:
            tar = tarfile.open(output_file, "r:gz")
        tar.extractall(cache_dir)
        tar.close()
        os.remove(output_file)
    elif output_file.endswith('.gz'):
        with gzip.open(output_file, 'rb') as f:
            with open(output_file.replace('.gz', ''), 'wb') as f_write:
                f_write.write(f.read())
        os.remove(output_file)
    elif output_file.endswith('.zip'):
        with zipfile.ZipFile(output_file, 'r') as zip_ref:
            zip_ref.extractall(cache_dir)
        os.remove(output_file)
    return True


def get_metadata(url: str):
    filename = os.path.basename(url).replace(".gz", "")
    if not os.path.exists(filename):
        assert wget(url, cache_dir=cache_dir_metadata)
    df = pd.read_csv(p_join(cache_dir_metadata, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
    df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
    return df


def get_audio(url: str, filename: str):
    if not os.path.exists(p_join(cache_dir_audio, filename)):
        return wget(url, filename=filename, cache_dir=cache_dir_audio)
    return True


def process_dataset(url_metadata):
    df_metadata = get_metadata(url_metadata)
    print(f"load metadata: {url_metadata}, ({len(df_metadata)} rows)")
    inputs = [(
        r['url'], f"{r['id']}.{r['direction']}.{r['side']}.{os.path.basename(r['url'])}"
    ) for _, r in df_metadata.iterrows()]
    inputs = [x for x in inputs if not os.path.exists(p_join(cache_dir_audio, x[1]))]
    print(f"{len(inputs)} urls to download")
    if n_pool == 1:
        for url, filename in tqdm(inputs, total=len(inputs)):
            flag = get_audio(url, filename)
            if not flag:
                print(f"failed:\n{url}")
    else:
        with Pool(n_pool) as pool:
            pool.starmap(get_audio, tqdm(inputs, total=len(inputs)))


if __name__ == '__main__':
    if data == "s2s":
        process_dataset(url_metadata_s2s)
    elif data == "s2t":
        process_dataset(url_metadata_s2t)
    else:
        raise ValueError(f"unknown data type {data}")