asahi417 commited on
Commit
eac7224
1 Parent(s): 3b95e3a
Files changed (3) hide show
  1. download_audio.py +10 -6
  2. fetch_dataset_s2t.py +232 -0
  3. main.sh +70 -48
download_audio.py CHANGED
@@ -20,9 +20,13 @@ audio_loader = Audio()
20
  # dataset config
21
  url_metadata_dict = {
22
  "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
23
- "enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
 
24
  }
25
  direction = os.getenv("DIRECTION", "enA-jaA")
 
 
 
26
  sides = set(direction.split("-"))
27
  cache_dir_audio = p_join("download", "audio", direction)
28
  cache_dir_feature = p_join("download", "feature", direction)
@@ -146,6 +150,11 @@ def get_audio(dataframe: pd.DataFrame):
146
  return features["line_no"]
147
 
148
 
 
 
 
 
 
149
  if __name__ == '__main__':
150
  if not skip_download:
151
  df_metadata = get_metadata()
@@ -170,11 +179,6 @@ if __name__ == '__main__':
170
  if line_no:
171
  print(line_no)
172
 
173
- def loader(feature: str) -> Dict:
174
- with open(feature) as f_reader:
175
- return json.load(f_reader)
176
-
177
-
178
  print("UPLOADING TO HF!!!")
179
  features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
180
  print(f"- raw feature: {len(features)}")
 
20
  # dataset config
21
  url_metadata_dict = {
22
  "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
23
+ "enA-zhA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-zhA.tsv.gz",
24
+ "enA-viA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-viA.tsv.gz",
25
  }
26
  direction = os.getenv("DIRECTION", "enA-jaA")
27
+ if direction not in url_metadata_dict:
28
+ a, b = direction.split("-")
29
+ url_metadata_dict[direction] = f"https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.{a}-{b}.tsv.gz"
30
  sides = set(direction.split("-"))
31
  cache_dir_audio = p_join("download", "audio", direction)
32
  cache_dir_feature = p_join("download", "feature", direction)
 
150
  return features["line_no"]
151
 
152
 
153
+ def loader(feature: str) -> Dict:
154
+ with open(feature) as f_reader:
155
+ return json.load(f_reader)
156
+
157
+
158
  if __name__ == '__main__':
159
  if not skip_download:
160
  df_metadata = get_metadata()
 
179
  if line_no:
180
  print(line_no)
181
 
 
 
 
 
 
182
  print("UPLOADING TO HF!!!")
183
  features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
184
  print(f"- raw feature: {len(features)}")
fetch_dataset_s2t.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import tarfile
4
+ import zipfile
5
+ import gzip
6
+ import subprocess
7
+ from os.path import join as p_join
8
+ from math import ceil, floor
9
+ from tqdm import tqdm
10
+ from multiprocessing import Pool
11
+ from typing import Optional, Dict
12
+ from glob import glob
13
+ # import librosa
14
+
15
+ import pandas as pd
16
+ import soundfile as sf
17
+ from datasets import Dataset, Audio, DatasetDict
18
+
19
+ audio_loader = Audio()
20
+ # dataset config
21
+ url_metadata_dict = {
22
+ "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
23
+ "enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
24
+ }
25
+ direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
26
+ direction_text = os.getenv("DIRECTION_TEXT", "jpn")
27
+ direction = os.getenv("DIRECTION", "enA-jpn")
28
+ sides = set(direction.split("-"))
29
+ cache_dir_audio = p_join("download", "audio", direction)
30
+ cache_dir_feature = p_join("download", "feature", direction)
31
+ os.makedirs(cache_dir_feature, exist_ok=True)
32
+ for s in sides:
33
+ os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
34
+ # processor config
35
+ n_pool = int(os.getenv("N_POOL", 1))
36
+ wget_max_retry = os.getenv("MAX_RETRY", "2")
37
+ wget_timeout = os.getenv("TIMEOUT", "20")
38
+ line_no_start = int(os.getenv("LINE_NO_START", 0))
39
+ line_no_end = int(os.getenv("LINE_NO_END", 10000))
40
+ dataset_id = os.getenv("DATASET_ID", 0)
41
+ hf_org = os.getenv("HF_ORG", "asahi417")
42
+ hf_dataset = f"seamless-align-{direction}"
43
+ skip_download = bool(int(os.getenv("SKIP_DOWNLOAD", 0)))
44
+ sampling_rate = 16000 # seamless-align aligns audio in 16kHz
45
+
46
+
47
+ def wget(url: str, output_file: Optional[str] = None):
48
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
49
+ subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
50
+ if not os.path.exists(output_file):
51
+ return False
52
+ if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
53
+ if output_file.endswith('.tar'):
54
+ tar = tarfile.open(output_file)
55
+ else:
56
+ tar = tarfile.open(output_file, "r:gz")
57
+ tar.extractall(os.path.dirname(output_file))
58
+ tar.close()
59
+ os.remove(output_file)
60
+ elif output_file.endswith('.gz'):
61
+ with gzip.open(output_file, 'rb') as f:
62
+ with open(output_file.replace('.gz', ''), 'wb') as f_write:
63
+ f_write.write(f.read())
64
+ os.remove(output_file)
65
+ elif output_file.endswith('.zip'):
66
+ with zipfile.ZipFile(output_file, 'r') as zip_ref:
67
+ zip_ref.extractall()
68
+ os.remove(output_file)
69
+ return True
70
+
71
+
72
+ def get_metadata():
73
+ url_metadata = url_metadata_dict[direction]
74
+ meta_data_filename = os.path.basename(url_metadata)
75
+ meta_data_path = p_join("download", "meta", meta_data_filename)
76
+ if not os.path.exists(meta_data_path.replace(".gz", "")):
77
+ assert wget(url_metadata, output_file=meta_data_path)
78
+ df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
79
+ df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
80
+ df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
81
+ if direction == "enA-jpn":
82
+ df = df[df["side"] == "enA"]
83
+ assert len(df["direction"].unique()) == 1
84
+ df.pop("direction")
85
+ return df.sort_values(by=["line_no", "side"])
86
+
87
+
88
+ def to_json_serializable(val):
89
+ if "float" in str(type(val)):
90
+ return float(val)
91
+ if "int" in str(type(val)):
92
+ return int(val)
93
+ return str(val)
94
+
95
+
96
+ def cleanup(features, feature_file):
97
+ if os.path.exists(feature_file):
98
+ os.remove(feature_file)
99
+ for _side in sides:
100
+ for _unrelated_audio_file in glob(p_join(cache_dir_audio, _side, f"{features['line_no']}.*")):
101
+ os.remove(_unrelated_audio_file)
102
+ # create a dummy so that we can skip from next run
103
+ with open(feature_file, "w") as f:
104
+ json.dump({"dummy": "dummy"}, f)
105
+
106
+
107
+ def get_audio(dataframe: pd.DataFrame):
108
+ resampler = {}
109
+ features = {"line_no": int(dataframe.pop('line_no').values[0])}
110
+ feature_file = p_join(cache_dir_feature, f'{features["line_no"]}.json')
111
+ for side, df in dataframe.groupby("side"):
112
+ df.pop("side")
113
+ features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
114
+ identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
115
+ features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
116
+ start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"]
117
+ if not os.path.exists(features[f"{side}.path"]):
118
+ print(f"WGET {features[f'{side}.url']}")
119
+ flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
120
+ if not flag:
121
+ print("\n#### ERROR: wget failure ####\n")
122
+ cleanup(features, feature_file)
123
+ return None
124
+ else:
125
+ try:
126
+ print(f"LOAD AUDIO FROM {features[f'{side}.path']}")
127
+ wav, sr = sf.read(features[f"{side}.path"])
128
+ print(f"wav shape:{wav.shape}")
129
+ if wav.ndim > 1:
130
+ wav = wav[:, 0]
131
+ wav = wav[floor(start / sampling_rate * sr):ceil(end / sampling_rate * sr)]
132
+ print(f"wav shape (after truncate):{wav.shape}")
133
+ wav = wav[:int(end/sampling_rate * sr) + sr]
134
+ print(f"SAVING: {features[f'{side}.path']}")
135
+ sf.write(features[f"{side}.path"], wav, sr)
136
+ # if sr != sampling_rate:
137
+ # print(f"RESAMPLING: {wav.shape} length audio")
138
+ # wav = librosa.resample(wav, orig_sr=sr, target_sr=sampling_rate)
139
+ # sf.write(features[f"{side}.path"], wav[start:end], sampling_rate)
140
+
141
+ except Exception as e:
142
+ print(f"\n#### ERROR ####\n {e}")
143
+ cleanup(features, feature_file)
144
+ return None
145
+ print(f"\n### SUCCESS! ###\n:{features['line_no']}")
146
+ with open(feature_file, "w") as f:
147
+ json.dump(features, f)
148
+ return features["line_no"]
149
+
150
+
151
+ def loader(feature: str) -> Dict:
152
+ with open(feature) as f_reader:
153
+ return json.load(f_reader)
154
+
155
+
156
+ if __name__ == '__main__':
157
+ if not skip_download:
158
+ df_metadata = get_metadata()
159
+ print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
160
+ inputs = [
161
+ g for line_no, g in df_metadata.groupby("line_no")
162
+ if line_no_start <= line_no < line_no_end and not os.path.exists(
163
+ p_join(cache_dir_feature, f'{int(line_no)}.json')
164
+ )
165
+ ]
166
+ print(f"filtered unique lines: {len(inputs)}")
167
+ if direction == "enA-jaA":
168
+ inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
169
+ print(f"removed side != 2: {len(inputs)}")
170
+
171
+ if n_pool == 1:
172
+ for g in tqdm(inputs, total=len(inputs)):
173
+ line_no = get_audio(g)
174
+ else:
175
+ with Pool(n_pool) as pool:
176
+ for line_no in pool.imap_unordered(get_audio, inputs):
177
+ if line_no:
178
+ print(line_no)
179
+
180
+ print("UPLOADING TO HF!!!")
181
+ features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
182
+ print(f"- raw feature: {len(features)}")
183
+ features = [i for i in features if os.path.exists(i)]
184
+ print(f"- path exists: {len(features)}")
185
+ features = [loader(i) for i in features]
186
+ features = [i for i in features if "dummy" not in i]
187
+ print(f"- dummy removed: {len(features)}")
188
+ print(f"push {len(features)} records to hub")
189
+ data_dict = {}
190
+ for side in sides:
191
+ data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]})
192
+ data_dict.update({k: [i[k] for i in features] for k in features[0].keys()})
193
+ audio_dataset = Dataset.from_dict(data_dict)
194
+ for side in sides:
195
+ audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
196
+ DatasetDict({"train": audio_dataset}).push_to_hub(
197
+ f"{hf_org}/{hf_dataset}",
198
+ config_name=f"subset_{dataset_id}"
199
+ )
200
+
201
+
202
+ # DatasetDict({"train": audio_dataset.select(list(range(1000)))}).push_to_hub(
203
+ # f"{hf_org}/{hf_dataset}",
204
+ # config_name=f"subset_{dataset_id}"
205
+ # )
206
+
207
+ # # 2 panel
208
+ # dataset_id = 75
209
+ # DatasetDict({"train": audio_dataset.select(list(range(3000, len(audio_dataset))))}).push_to_hub(
210
+ # f"{hf_org}/{hf_dataset}",
211
+ # config_name=f"subset_{dataset_id}"
212
+ # )
213
+ #
214
+ #
215
+
216
+
217
+ # audio_dataset = audio_dataset.select(list(range(2500)))
218
+ # dataset_to_push = DatasetDict({"train": audio_dataset})
219
+ # repo_name = f"{hf_org}/{hf_dataset}"
220
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
221
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", max_shard_size="2GiB")
222
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", num_shards={"train": 1})
223
+
224
+ # while True:
225
+ # try:
226
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
227
+ # break
228
+ # except Exception:
229
+ # print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
230
+ # time.sleep(60)
231
+
232
+
main.sh CHANGED
@@ -18,11 +18,15 @@ python -c 'n=1; import os; from glob import glob; tmp = [int(os.path.basename(i)
18
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
19
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
20
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
21
- python -c 'n=10; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
22
- python -c 'n=10; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
23
- python -c 'n=10; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
24
 
25
  python -c 'file_name="tmp.mp3"; from datasets import Audio; a=Audio(); wav=a.decode_example({"path": file_name, "bytes": None}); print(wav)'
 
 
 
 
26
  # test
27
  export DATASET_ID=test
28
  export DIRECTION="enA-jaA"
@@ -30,64 +34,86 @@ export LINE_NO_START=0
30
  export LINE_NO_END=10
31
  python download_audio.py
32
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  ####################
34
- # enA-jaA: 718_606 #
35
  ####################
36
- export N_POOL=1
37
- export DATASET_ID=1
38
- export DIRECTION="enA-jaA"
39
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
40
- export LINE_NO_END=$((DATASET_ID * 2500))
41
- echo ${LINE_NO_START}
42
  python download_audio.py
43
 
44
- export N_POOL=10
45
- export DATASET_ID=2
46
- export DIRECTION="enA-jaA"
47
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
48
- export LINE_NO_END=$((DATASET_ID * 2500))
49
- echo ${LINE_NO_START}
 
 
50
  python download_audio.py
51
 
52
- export N_POOL=10
53
- export DATASET_ID=10
54
- export DIRECTION="enA-jaA"
55
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
56
- export LINE_NO_END=$((DATASET_ID * 2500))
57
- echo ${LINE_NO_START}
 
 
58
  python download_audio.py
59
 
60
-
61
-
62
- export N_POOL=10
63
- export DATASET_ID=41
64
- export DIRECTION="enA-jaA"
65
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
66
- export LINE_NO_END=$((DATASET_ID * 2500))
67
- echo ${LINE_NO_START}
68
  python download_audio.py
69
 
70
- export N_POOL=10
71
- export DATASET_ID=42
72
- export DIRECTION="enA-jaA"
73
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
74
- export LINE_NO_END=$((DATASET_ID * 2500))
75
- echo ${LINE_NO_START}
 
 
76
  python download_audio.py
77
 
78
- export N_POOL=10
79
- export DATASET_ID=51
 
 
 
 
80
  export DIRECTION="enA-jaA"
81
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
82
- export LINE_NO_END=$((DATASET_ID * 2500))
83
- echo ${LINE_NO_START}
84
  python download_audio.py
85
 
86
- for i in $(seq 51 60);
 
 
87
  do
88
- export N_POOL=10
89
  export DATASET_ID=${i}
90
- export DIRECTION="enA-jaA"
91
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
92
  export LINE_NO_END=$((DATASET_ID * 2500))
93
  echo ${LINE_NO_START}
@@ -95,10 +121,6 @@ do
95
  done
96
 
97
 
98
- ######################
99
- # enA-jpn: 1_468_292 #
100
- ######################
101
- # DOWNLOAD AUDIO
102
  export DIRECTION="enA-jpn"
103
  export LINE_NO_START=0
104
  export LINE_NO_END=50000
 
18
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
19
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
20
  python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
21
+ python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
22
+ python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
23
+ python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
24
 
25
  python -c 'file_name="tmp.mp3"; from datasets import Audio; a=Audio(); wav=a.decode_example({"path": file_name, "bytes": None}); print(wav)'
26
+
27
+ ####################
28
+ # enA-jaA: 718_606 #
29
+ ####################
30
  # test
31
  export DATASET_ID=test
32
  export DIRECTION="enA-jaA"
 
34
  export LINE_NO_END=10
35
  python download_audio.py
36
 
37
+ # main
38
+ for i in $(seq 1 144);
39
+ do
40
+ export N_POOL=15
41
+ export DATASET_ID=${i}
42
+ export DIRECTION="enA-jaA"
43
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
44
+ export LINE_NO_END=$((DATASET_ID * 2500))
45
+ echo ${LINE_NO_START}
46
+ python download_audio.py
47
+ done
48
+
49
  ####################
50
+ # enA-zhA: 1_289_192 #
51
  ####################
52
+ # test
53
+ export DATASET_ID=test
54
+ export DIRECTION="enA-zhA"
55
+ export LINE_NO_START=0
56
+ export LINE_NO_END=10
 
57
  python download_audio.py
58
 
59
+ ####################
60
+ # enA-viA: 740_598 #
61
+ ####################
62
+ # test
63
+ export DATASET_ID=test
64
+ export DIRECTION="enA-viA"
65
+ export LINE_NO_START=0
66
+ export LINE_NO_END=10
67
  python download_audio.py
68
 
69
+ ####################
70
+ # enA-koA: 511_358 #
71
+ ####################
72
+ # test
73
+ export DATASET_ID=test
74
+ export DIRECTION="enA-koA"
75
+ export LINE_NO_START=0
76
+ export LINE_NO_END=10
77
  python download_audio.py
78
 
79
+ ####################
80
+ # enA-hiA: #
81
+ ####################
82
+ # test
83
+ export DATASET_ID=test
84
+ export DIRECTION="enA-hiA"
85
+ export LINE_NO_START=0
86
+ export LINE_NO_END=10
87
  python download_audio.py
88
 
89
+ ####################
90
+ # enA-deA: 511_358 #
91
+ ####################
92
+ # test
93
+ export DATASET_ID=test
94
+ export DIRECTION="enA-frA"
95
+ export LINE_NO_START=0
96
+ export LINE_NO_END=10
97
  python download_audio.py
98
 
99
+
100
+ ######################
101
+ # enA-jpn: 1_468_292 #
102
+ ######################
103
+ # test
104
+ export DATASET_ID=test
105
  export DIRECTION="enA-jaA"
106
+ export LINE_NO_START=0
107
+ export LINE_NO_END=10
 
108
  python download_audio.py
109
 
110
+
111
+ # DOWNLOAD AUDIO
112
+ for i in $(seq 91 100);
113
  do
114
+ export N_POOL=15
115
  export DATASET_ID=${i}
116
+ export DIRECTION="enA-jpn"
117
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
118
  export LINE_NO_END=$((DATASET_ID * 2500))
119
  echo ${LINE_NO_START}
 
121
  done
122
 
123
 
 
 
 
 
124
  export DIRECTION="enA-jpn"
125
  export LINE_NO_START=0
126
  export LINE_NO_END=50000