asahi417 commited on
Commit
4a04b75
1 Parent(s): 3622be8
download_audio.py DELETED
@@ -1,234 +0,0 @@
1
- import json
2
- import os
3
- import tarfile
4
- import zipfile
5
- import gzip
6
- import subprocess
7
- from os.path import join as p_join
8
- from math import ceil, floor
9
- from tqdm import tqdm
10
- from multiprocessing import Pool
11
- from typing import Optional, Dict
12
- from glob import glob
13
- # import librosa
14
-
15
- import pandas as pd
16
- import soundfile as sf
17
- from datasets import Dataset, Audio, DatasetDict
18
-
19
- audio_loader = Audio()
20
- # dataset config
21
- url_metadata_dict = {
22
- "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
23
- "enA-zhA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-zhA.tsv.gz",
24
- "enA-viA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-viA.tsv.gz",
25
- }
26
- direction = os.getenv("DIRECTION", "enA-jaA")
27
- if direction not in url_metadata_dict:
28
- a, b = direction.split("-")
29
- url_metadata_dict[direction] = f"https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.{a}-{b}.tsv.gz"
30
- sides = set(direction.split("-"))
31
- cache_dir_audio = p_join("download", "audio", direction)
32
- cache_dir_feature = p_join("download", "feature", direction)
33
- os.makedirs(cache_dir_feature, exist_ok=True)
34
- for s in sides:
35
- os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
36
- # processor config
37
- n_pool = int(os.getenv("N_POOL", 1))
38
- wget_max_retry = os.getenv("MAX_RETRY", "2")
39
- wget_timeout = os.getenv("TIMEOUT", "20")
40
- line_no_start = int(os.getenv("LINE_NO_START", 0))
41
- line_no_end = int(os.getenv("LINE_NO_END", 10000))
42
- dataset_id = os.getenv("DATASET_ID", 0)
43
- hf_org = os.getenv("HF_ORG", "asahi417")
44
- hf_dataset = f"seamless-align-{direction}"
45
- skip_download = bool(int(os.getenv("SKIP_DOWNLOAD", 0)))
46
- sampling_rate = 16000 # seamless-align aligns audio in 16kHz
47
-
48
-
49
- def wget(url: str, output_file: Optional[str] = None):
50
- os.makedirs(os.path.dirname(output_file), exist_ok=True)
51
- subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
52
- if not os.path.exists(output_file):
53
- return False
54
- if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
55
- if output_file.endswith('.tar'):
56
- tar = tarfile.open(output_file)
57
- else:
58
- tar = tarfile.open(output_file, "r:gz")
59
- tar.extractall(os.path.dirname(output_file))
60
- tar.close()
61
- os.remove(output_file)
62
- elif output_file.endswith('.gz'):
63
- with gzip.open(output_file, 'rb') as f:
64
- with open(output_file.replace('.gz', ''), 'wb') as f_write:
65
- f_write.write(f.read())
66
- os.remove(output_file)
67
- elif output_file.endswith('.zip'):
68
- with zipfile.ZipFile(output_file, 'r') as zip_ref:
69
- zip_ref.extractall()
70
- os.remove(output_file)
71
- return True
72
-
73
-
74
- def get_metadata():
75
- url_metadata = url_metadata_dict[direction]
76
- meta_data_filename = os.path.basename(url_metadata)
77
- meta_data_path = p_join("download", "meta", meta_data_filename)
78
- if not os.path.exists(meta_data_path.replace(".gz", "")):
79
- assert wget(url_metadata, output_file=meta_data_path)
80
- df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
81
- df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
82
- df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
83
- if direction == "enA-jpn":
84
- df = df[df["side"] == "enA"]
85
- assert len(df["direction"].unique()) == 1
86
- df.pop("direction")
87
- return df.sort_values(by=["line_no", "side"])
88
-
89
-
90
- def to_json_serializable(val):
91
- if "float" in str(type(val)):
92
- return float(val)
93
- if "int" in str(type(val)):
94
- return int(val)
95
- return str(val)
96
-
97
-
98
- def cleanup(features, feature_file):
99
- if os.path.exists(feature_file):
100
- os.remove(feature_file)
101
- for _side in sides:
102
- for _unrelated_audio_file in glob(p_join(cache_dir_audio, _side, f"{features['line_no']}.*")):
103
- os.remove(_unrelated_audio_file)
104
- # create a dummy so that we can skip from next run
105
- with open(feature_file, "w") as f:
106
- json.dump({"dummy": "dummy"}, f)
107
-
108
-
109
- def get_audio(dataframe: pd.DataFrame):
110
- resampler = {}
111
- features = {"line_no": int(dataframe.pop('line_no').values[0])}
112
- feature_file = p_join(cache_dir_feature, f'{features["line_no"]}.json')
113
- for side, df in dataframe.groupby("side"):
114
- df.pop("side")
115
- features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
116
- identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
117
- features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
118
- start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"]
119
- if not os.path.exists(features[f"{side}.path"]):
120
- print(f"WGET {features[f'{side}.url']}")
121
- flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
122
- if not flag:
123
- print("\n#### ERROR: wget failure ####\n")
124
- cleanup(features, feature_file)
125
- return None
126
- else:
127
- try:
128
- print(f"LOAD AUDIO FROM {features[f'{side}.path']}")
129
- wav, sr = sf.read(features[f"{side}.path"])
130
- print(f"wav shape:{wav.shape}")
131
- if wav.ndim > 1:
132
- wav = wav[:, 0]
133
- wav = wav[floor(start / sampling_rate * sr):ceil(end / sampling_rate * sr)]
134
- print(f"wav shape (after truncate):{wav.shape}")
135
- wav = wav[:int(end/sampling_rate * sr) + sr]
136
- print(f"SAVING: {features[f'{side}.path']}")
137
- sf.write(features[f"{side}.path"], wav, sr)
138
- # if sr != sampling_rate:
139
- # print(f"RESAMPLING: {wav.shape} length audio")
140
- # wav = librosa.resample(wav, orig_sr=sr, target_sr=sampling_rate)
141
- # sf.write(features[f"{side}.path"], wav[start:end], sampling_rate)
142
-
143
- except Exception as e:
144
- print(f"\n#### ERROR ####\n {e}")
145
- cleanup(features, feature_file)
146
- return None
147
- print(f"\n### SUCCESS! ###\n:{features['line_no']}")
148
- with open(feature_file, "w") as f:
149
- json.dump(features, f)
150
- return features["line_no"]
151
-
152
-
153
- def loader(feature: str) -> Dict:
154
- with open(feature) as f_reader:
155
- return json.load(f_reader)
156
-
157
-
158
- if __name__ == '__main__':
159
- if not skip_download:
160
- df_metadata = get_metadata()
161
- print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
162
- inputs = [
163
- g for line_no, g in df_metadata.groupby("line_no")
164
- if line_no_start <= line_no < line_no_end and not os.path.exists(
165
- p_join(cache_dir_feature, f'{int(line_no)}.json')
166
- )
167
- ]
168
- print(f"filtered unique lines: {len(inputs)}")
169
- if direction == "enA-jaA":
170
- inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
171
- print(f"removed side != 2: {len(inputs)}")
172
-
173
- if n_pool == 1:
174
- for g in tqdm(inputs, total=len(inputs)):
175
- line_no = get_audio(g)
176
- else:
177
- with Pool(n_pool) as pool:
178
- for line_no in pool.imap_unordered(get_audio, inputs):
179
- if line_no:
180
- print(line_no)
181
-
182
- print("UPLOADING TO HF!!!")
183
- features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
184
- print(f"- raw feature: {len(features)}")
185
- features = [i for i in features if os.path.exists(i)]
186
- print(f"- path exists: {len(features)}")
187
- features = [loader(i) for i in features]
188
- features = [i for i in features if "dummy" not in i]
189
- print(f"- dummy removed: {len(features)}")
190
- print(f"push {len(features)} records to hub")
191
- data_dict = {}
192
- for side in sides:
193
- data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]})
194
- data_dict.update({k: [i[k] for i in features] for k in features[0].keys()})
195
- audio_dataset = Dataset.from_dict(data_dict)
196
- for side in sides:
197
- audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
198
- DatasetDict({"train": audio_dataset}).push_to_hub(
199
- f"{hf_org}/{hf_dataset}",
200
- config_name=f"subset_{dataset_id}"
201
- )
202
-
203
-
204
- # DatasetDict({"train": audio_dataset.select(list(range(1000)))}).push_to_hub(
205
- # f"{hf_org}/{hf_dataset}",
206
- # config_name=f"subset_{dataset_id}"
207
- # )
208
-
209
- # # 2 panel
210
- # dataset_id = 75
211
- # DatasetDict({"train": audio_dataset.select(list(range(3000, len(audio_dataset))))}).push_to_hub(
212
- # f"{hf_org}/{hf_dataset}",
213
- # config_name=f"subset_{dataset_id}"
214
- # )
215
- #
216
- #
217
-
218
-
219
- # audio_dataset = audio_dataset.select(list(range(2500)))
220
- # dataset_to_push = DatasetDict({"train": audio_dataset})
221
- # repo_name = f"{hf_org}/{hf_dataset}"
222
- # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
223
- # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", max_shard_size="2GiB")
224
- # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", num_shards={"train": 1})
225
-
226
- # while True:
227
- # try:
228
- # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
229
- # break
230
- # except Exception:
231
- # print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
232
- # time.sleep(60)
233
-
234
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
encodec_audio_tokenizer.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """https://github.com/facebookresearch/audiocraft/blob/main/audiocraft/models/multibanddiffusion.py"""
2
+ import logging
3
+ from typing import Optional, List
4
+ from math import ceil
5
+ import torch
6
+ import julius
7
+
8
+ from tqdm import tqdm
9
+ from audiocraft.models.encodec import CompressionModel
10
+ from audiocraft.solvers.compression import CompressionSolver
11
+
12
+
13
+ class BaseEncodecTokenizer:
14
+
15
+ def __init__(self,
16
+ codec_model: CompressionModel,
17
+ sample_per_token: int = 320,
18
+ num_codebooks_encoder: Optional[int] = None) -> None:
19
+ """Base class for multi-band diffusion.
20
+ Args:
21
+ codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.
22
+ sample_per_token (int): Number of sample per token (320 for 24kHz encodec).
23
+ num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
24
+ """
25
+ self.codec_model = codec_model
26
+ self.device = next(self.codec_model.parameters()).device
27
+ self.sample_per_token = sample_per_token
28
+ self.num_codebooks_encoder = num_codebooks_encoder
29
+
30
+ @property
31
+ def sample_rate(self) -> int:
32
+ return self.codec_model.sample_rate
33
+
34
+ @torch.no_grad()
35
+ def wav_to_tokens(self,
36
+ wav: torch.Tensor,
37
+ sample_rate: List[int],
38
+ cpu_offload: bool = True,
39
+ chunk_length: Optional[int] = None,
40
+ stride: Optional[int] = None,
41
+ concat_strategy: str = "first") -> torch.Tensor:
42
+ """Get audio tokens from waveform in batch. Note that Encodec generates 75 tokens per second of audio at 24 kHz
43
+ meaning 320 samples (13.333 msec) per tokens.
44
+ Args:
45
+ wav (torch.Tensor): The audio that we want to extract the conditioning from (batch, channel, wav).
46
+ sample_rate (int): Sample rate of the audio.
47
+ cpu_offload (bool): Move the output tokens to cpu on the fly to save cuda memory.
48
+ chunk_length (int): Chunk length to split a long audio (sample size, must be divisible by sample_per_token).
49
+ stride (int): Stride over chunked audio (sample size, must be divisible by sample_per_token).
50
+ concat_strategy (str): "first" or "last" to indicate which chunk to use when consolidating the overlap.
51
+ """
52
+ # sanity check
53
+ if wav.ndim != 3:
54
+ raise ValueError(f"wav should be (batch, channel, time): {wav.ndim} dims")
55
+ original_device = wav.device
56
+ # sampling audio
57
+ assert len(sample_rate) == len(wav)
58
+ new_wav = []
59
+ for sr, single_wav in zip(sample_rate, wav):
60
+ if sr != self.sample_rate:
61
+ single_wav = julius.resample_frac(single_wav, sr, self.sample_rate)
62
+ new_wav.append(single_wav)
63
+ wav = torch.concat(new_wav)
64
+ batch_size, channels, input_length = wav.shape
65
+ if channels > 1:
66
+ logging.warning("Audio has more than one channel but encoder takes the first channel only.")
67
+ # validate chunk length and stride (if None, do one-shot process)
68
+ if chunk_length:
69
+ if chunk_length % self.sample_per_token != 0:
70
+ raise ValueError(f"chunk_length must be divisible by {self.sample_per_token}: {chunk_length}")
71
+ else:
72
+ chunk_length = input_length
73
+ chunk_length_latent = ceil(chunk_length / self.sample_per_token)
74
+ if stride:
75
+ if stride % self.sample_per_token != 0:
76
+ raise ValueError(f"stride must be divisible by {self.sample_per_token}: {stride}")
77
+ else:
78
+ stride = chunk_length
79
+ stride_latent = ceil(stride / self.sample_per_token)
80
+ # initialize the token tensor
81
+ num_tokens = ceil(input_length / self.sample_per_token)
82
+ num_filters = self.codec_model.model.config.num_filters
83
+ if self.num_codebooks_encoder is not None:
84
+ if self.num_codebooks_encoder > num_filters:
85
+ raise ValueError(f"num_codebooks_encoder must be smaller than {num_filters}")
86
+ num_filters = self.num_codebooks_encoder
87
+ tokens = torch.zeros(
88
+ (batch_size, num_filters, num_tokens),
89
+ device="cpu" if cpu_offload else original_device,
90
+ dtype=torch.int64
91
+ )
92
+ # tokenize by chunk in a sequential manner
93
+ for offset in tqdm(list(range(0, input_length - chunk_length + stride, stride))):
94
+ frame = wav[:, :1, offset: offset + chunk_length]
95
+ tmp_tokens, _ = self.codec_model.encode(frame.to(self.device))
96
+ offset_latent = int(offset / self.sample_per_token)
97
+ tmp_tokens = tmp_tokens.to("cpu") if cpu_offload else tmp_tokens.to(original_device)
98
+ if concat_strategy == "last" or offset == 0:
99
+ tokens[:, :, offset_latent: offset_latent + chunk_length_latent] = tmp_tokens[:, :num_filters, :]
100
+ else:
101
+ overlap_token = chunk_length_latent - stride_latent
102
+ tokens[:, :, offset_latent + overlap_token: offset_latent + chunk_length_latent] \
103
+ = tmp_tokens[:, :num_filters, overlap_token:]
104
+ return tokens
105
+
106
+
107
+ class EncodecTokenizer:
108
+
109
+ @staticmethod
110
+ def from_pretrained(num_codebooks_encoder: Optional[int] = None) -> BaseEncodecTokenizer:
111
+ """Get the pretrained Models for MultiBandDiffusion.
112
+ Args:
113
+ num_codebooks_encoder (int): Number of codebook to use for encoder (default full code).
114
+ """
115
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
116
+ codec_model = CompressionSolver.model_from_checkpoint(
117
+ '//pretrained/facebook/encodec_24khz', device=device
118
+ )
119
+ codec_model = codec_model.to(device)
120
+ return BaseEncodecTokenizer(
121
+ codec_model=codec_model,
122
+ num_codebooks_encoder=num_codebooks_encoder
123
+ )
main_s2s.sh CHANGED
@@ -19,16 +19,6 @@ do
19
  python fetch_dataset_s2s.py
20
  done
21
 
22
- ######################
23
- # enA-zhA: 1_289_192 #
24
- ######################
25
- # test
26
- export DATASET_ID=test
27
- export DIRECTION="enA-zhA"
28
- export LINE_NO_START=0
29
- export LINE_NO_END=10
30
- python fetch_dataset_s2s.py
31
-
32
  ####################
33
  # enA-viA: 740_598 #
34
  ####################
@@ -49,6 +39,16 @@ do
49
  echo ${LINE_NO_START}
50
  python fetch_dataset_s2s.py
51
  done
 
 
 
 
 
 
 
 
 
 
52
 
53
  ####################
54
  # enA-koA: 511_358 #
@@ -59,6 +59,27 @@ export DIRECTION="enA-koA"
59
  export LINE_NO_START=0
60
  export LINE_NO_END=10
61
  python fetch_dataset_s2s.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  ####################
64
  # enA-hiA: 454_942 #
@@ -69,6 +90,48 @@ export DIRECTION="enA-hiA"
69
  export LINE_NO_START=0
70
  export LINE_NO_END=10
71
  python fetch_dataset_s2s.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  ######################
74
  # enA-frA: 3_054_258 #
@@ -79,6 +142,17 @@ export DIRECTION="enA-frA"
79
  export LINE_NO_START=0
80
  export LINE_NO_END=10
81
  python fetch_dataset_s2s.py
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  ######################
84
  # enA-esA: 2_658_022 #
 
19
  python fetch_dataset_s2s.py
20
  done
21
 
 
 
 
 
 
 
 
 
 
 
22
  ####################
23
  # enA-viA: 740_598 #
24
  ####################
 
39
  echo ${LINE_NO_START}
40
  python fetch_dataset_s2s.py
41
  done
42
+ for i in $(seq 41 80);
43
+ do
44
+ export N_POOL=15
45
+ export DATASET_ID=${i}
46
+ export DIRECTION="enA-viA"
47
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
48
+ export LINE_NO_END=$((DATASET_ID * 2500))
49
+ echo ${LINE_NO_START}
50
+ python fetch_dataset_s2s.py
51
+ done
52
 
53
  ####################
54
  # enA-koA: 511_358 #
 
59
  export LINE_NO_START=0
60
  export LINE_NO_END=10
61
  python fetch_dataset_s2s.py
62
+ # main
63
+ for i in $(seq 1 50);
64
+ do
65
+ export N_POOL=15
66
+ export DATASET_ID=${i}
67
+ export DIRECTION="enA-koA"
68
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
69
+ export LINE_NO_END=$((DATASET_ID * 2500))
70
+ echo ${LINE_NO_START}
71
+ python fetch_dataset_s2s.py
72
+ done
73
+ for i in $(seq 51 100);
74
+ do
75
+ export N_POOL=15
76
+ export DATASET_ID=${i}
77
+ export DIRECTION="enA-koA"
78
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
79
+ export LINE_NO_END=$((DATASET_ID * 2500))
80
+ echo ${LINE_NO_START}
81
+ python fetch_dataset_s2s.py
82
+ done
83
 
84
  ####################
85
  # enA-hiA: 454_942 #
 
90
  export LINE_NO_START=0
91
  export LINE_NO_END=10
92
  python fetch_dataset_s2s.py
93
+ # main
94
+ for i in $(seq 1 50);
95
+ do
96
+ export N_POOL=15
97
+ export DATASET_ID=${i}
98
+ export DIRECTION="enA-hiA"
99
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
100
+ export LINE_NO_END=$((DATASET_ID * 2500))
101
+ echo ${LINE_NO_START}
102
+ python fetch_dataset_s2s.py
103
+ done
104
+ for i in $(seq 51 91);
105
+ do
106
+ export N_POOL=15
107
+ export DATASET_ID=${i}
108
+ export DIRECTION="enA-hiA"
109
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
110
+ export LINE_NO_END=$((DATASET_ID * 2500))
111
+ echo ${LINE_NO_START}
112
+ python fetch_dataset_s2s.py
113
+ done
114
+
115
+ ######################
116
+ # enA-zhA: 1_289_192 #
117
+ ######################
118
+ # test
119
+ export DATASET_ID=test
120
+ export DIRECTION="enA-zhA"
121
+ export LINE_NO_START=0
122
+ export LINE_NO_END=10
123
+ python fetch_dataset_s2s.py
124
+ # main
125
+ for i in $(seq 1 100);
126
+ do
127
+ export N_POOL=15
128
+ export DATASET_ID=${i}
129
+ export DIRECTION="enA-viA"
130
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
131
+ export LINE_NO_END=$((DATASET_ID * 2500))
132
+ echo ${LINE_NO_START}
133
+ python fetch_dataset_s2s.py
134
+ done
135
 
136
  ######################
137
  # enA-frA: 3_054_258 #
 
142
  export LINE_NO_START=0
143
  export LINE_NO_END=10
144
  python fetch_dataset_s2s.py
145
+ # main
146
+ for i in $(seq 1 100);
147
+ do
148
+ export N_POOL=15
149
+ export DATASET_ID=${i}
150
+ export DIRECTION="enA-viA"
151
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
152
+ export LINE_NO_END=$((DATASET_ID * 2500))
153
+ echo ${LINE_NO_START}
154
+ python fetch_dataset_s2s.py
155
+ done
156
 
157
  ######################
158
  # enA-esA: 2_658_022 #
main_s2t.sh CHANGED
@@ -53,7 +53,7 @@ for i in $(seq 1 ${CHUNK_SIZE});
53
  do
54
  cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_1.tsv &
55
  done
56
- python format_text.py
57
 
58
  ########
59
  # NLLB #
 
53
  do
54
  cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_1.tsv &
55
  done
56
+ #python format_text.py
57
 
58
  ########
59
  # NLLB #
requirements.txt DELETED
@@ -1,6 +0,0 @@
1
- datasets
2
- soundfile
3
- librosa
4
- requests
5
- pandas
6
- julius
 
 
 
 
 
 
 
tokenize_dataset_s2s.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from datasets import load_dataset, DatasetDict
5
+ from encodec_audio_tokenizer import EncodecTokenizer
6
+
7
+
8
+ direction = os.getenv("DIRECTION", "enA-jaA")
9
+ sides = set(direction.split("-"))
10
+ dataset_id = os.getenv("DATASET_ID", 0)
11
+ batch_size = int(os.getenv("BATCH_SIZE", 64))
12
+ num_proc = int(os.getenv("NUM_PROC", 1))
13
+ hf_org = os.getenv("HF_ORG", "asahi417")
14
+ hf_dataset = f"seamless-align-{direction}"
15
+ dataset = load_dataset(f"{hf_org}/{hf_dataset}", config_name=f"subset_{dataset_id}", split="train")
16
+ tokenizer = EncodecTokenizer.from_pretrained()
17
+
18
+
19
+ def tokenize(batch):
20
+ for side in sides:
21
+ wav = torch.concat([i["array"] for i in batch[f"{side}.audio"]])
22
+ sr = [i["sampling_rate"] for i in batch[f"{side}.audio"]]
23
+ batch[f"{side}.audio.tokens"] = tokenizer.wav_to_tokens(wav=wav, sample_rate=sr).numpy().tolist()
24
+ return batch
25
+
26
+
27
+ dataset = dataset.map(
28
+ function=tokenize,
29
+ remove_columns=[f"{s}.audio" for s in sides] + [f"{s}.url" for s in sides] + [f"{s}.duration_start" for s in sides] + [f"{s}.duration_end" for s in sides],
30
+ batched=True,
31
+ batch_size=batch_size,
32
+ num_proc=num_proc,
33
+ desc="tokenize dataset"
34
+ )
35
+ DatasetDict({"train": dataset}).push_to_hub(
36
+ f"{hf_org}/{hf_dataset}.tokenized",
37
+ config_name=f"subset_{dataset_id}"
38
+ )