asahi417 commited on
Commit
4fefb4c
1 Parent(s): 81a79fe
Files changed (5) hide show
  1. download_audio.py +4 -2
  2. main.sh +1 -1
  3. push_s2s_translation.py +112 -0
  4. requirements.txt +3 -0
  5. upload_audio.py +0 -31
download_audio.py CHANGED
@@ -84,9 +84,11 @@ def get_audio(dataframe: pd.DataFrame):
84
  for side, df in dataframe.groupby("side"):
85
  df.pop("side")
86
  features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
87
- features[f"{side}.path"] = str(p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"])))
 
88
  if not os.path.exists(features[f"{side}.path"]):
89
- if not wget(features[f"{side}.url"], output_file=features[f"{side}.path"]):
 
90
  return False
91
  with open(p_join(cache_dir_feature, f'{features["line_no"]}.json'), "w") as f:
92
  json.dump(features, f)
 
84
  for side, df in dataframe.groupby("side"):
85
  df.pop("side")
86
  features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
87
+ identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
88
+ features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
89
  if not os.path.exists(features[f"{side}.path"]):
90
+ flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
91
+ if not flag:
92
  return False
93
  with open(p_join(cache_dir_feature, f'{features["line_no"]}.json'), "w") as f:
94
  json.dump(features, f)
main.sh CHANGED
@@ -2,7 +2,7 @@
2
  # enA-jaA: 718_606 #
3
  ####################
4
  export DIRECTION="enA-jaA"
5
- export MAX_RETRY=10
6
  export LINE_NO_START=0
7
  export LINE_NO_END=50000
8
  python download_audio.py
 
2
  # enA-jaA: 718_606 #
3
  ####################
4
  export DIRECTION="enA-jaA"
5
+
6
  export LINE_NO_START=0
7
  export LINE_NO_END=50000
8
  python download_audio.py
push_s2s_translation.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import time
4
+ from os.path import join as p_join
5
+ from tqdm import tqdm
6
+ from typing import Dict
7
+ from glob import glob
8
+
9
+ from soundfile import LibsndfileError
10
+ from datasets import Dataset, Audio, DatasetDict
11
+
12
+ # dataset config
13
+ direction = os.getenv("DIRECTION", "enA-jaA")
14
+ sides = {i: n for n, i in enumerate(sorted(direction.split("-")), 1)}
15
+ sides_rev = {v: k for k, v in sides.items()}
16
+ cache_dir_audio = p_join("download", "audio", direction)
17
+ cache_dir_feature = p_join("download", "feature", direction)
18
+ os.makedirs(cache_dir_audio, exist_ok=True)
19
+ os.makedirs(cache_dir_feature, exist_ok=True)
20
+ line_no_start = int(os.getenv("LINE_NO_START", 0))
21
+ line_no_end = int(os.getenv("LINE_NO_END", 10000))
22
+ dataset_id = int(os.getenv("DATASET_ID", 0))
23
+ hf_org = "kotoba-tech"
24
+ hf_dataset = f"seamless-align-{direction}-{dataset_id}"
25
+
26
+
27
+ def loader(feature: str) -> Dict:
28
+ with open(feature) as f:
29
+ return json.load(f)
30
+
31
+
32
+ # create a dataset instance
33
+
34
+ files = {
35
+ int(os.path.basename(i).replace(".json", "")): i for i in glob(p_join(cache_dir_feature, "*.json"))
36
+ }
37
+ file_ids = [i for i in range(line_no_start, line_no_end) if i in files]
38
+ features = [loader(files[i]) for i in file_ids]
39
+ print(f"features: {len(features)}")
40
+ features = [i for i in features if os.path.exists(i[f"{sides_rev[1]}.path"]) and os.path.exists(i[f"{sides_rev[2]}.path"])]
41
+ print(f"features (filtered): {len(features)}")
42
+ data_dict = {
43
+ f"{sides_rev[1]}.audio": [i.pop(f"{sides_rev[1]}.path") for i in features],
44
+ f"{sides_rev[2]}.audio": [i.pop(f"{sides_rev[2]}.path") for i in features]
45
+ }
46
+ keys = features[0].keys()
47
+ data_dict.update(
48
+ {k: [i[k] for i in features] for k in keys}
49
+ )
50
+ audio_dataset = Dataset.from_dict(data_dict)
51
+ audio_dataset = audio_dataset.cast_column(f"{sides_rev[1]}.audio", Audio())
52
+ audio_dataset = audio_dataset.cast_column(f"{sides_rev[2]}.audio", Audio())
53
+
54
+ # remove instances with broken audio files
55
+ broken_files = []
56
+ for i in tqdm(range(len(audio_dataset))):
57
+ try:
58
+ a = audio_dataset[i]
59
+ flag = True
60
+ for side_id in sides_rev.keys():
61
+ start = a[f"{sides_rev[side_id]}.duration_start"]
62
+ end = a[f"{sides_rev[side_id]}.duration_end"]
63
+ array = a[f"{sides_rev[side_id]}.audio"]["array"]
64
+ flag = 0 < start < end < len(array)
65
+ if not flag:
66
+ broken_files.append(i)
67
+ except LibsndfileError:
68
+ broken_files.append(i)
69
+ continue
70
+ print(f"features (removed broken audio): {len(audio_dataset) - len(broken_files)}")
71
+
72
+ # remove broken files
73
+ for i in broken_files:
74
+ if os.path.exists(files[file_ids[i]]):
75
+ os.remove(files[file_ids[i]])
76
+ for side_id in sides_rev.keys():
77
+ if os.path.exists(data_dict[f"{sides_rev[side_id]}.audio"][i]):
78
+ os.remove(data_dict[f"{sides_rev[side_id]}.audio"][i])
79
+ valid_data_id = [i for i in range(len(audio_dataset)) if i not in broken_files]
80
+ audio_dataset_valid = audio_dataset.select(valid_data_id)
81
+
82
+
83
+ # trim the audio according to the duration
84
+ def clip_audio(batch):
85
+ for side_id in sides_rev.keys():
86
+ start = batch[f"{sides_rev[side_id]}.duration_start"]
87
+ end = batch[f"{sides_rev[side_id]}.duration_end"]
88
+ audio = batch[f"{sides_rev[side_id]}.audio"]
89
+ batch[f"{sides_rev[side_id]}.audio"] = [
90
+ {"array": a["array"][s:e], "sampling_rate": a["sampling_rate"]}
91
+ for a, s, e in zip(audio, start, end)
92
+ ]
93
+ return batch
94
+
95
+
96
+ audio_dataset_valid = audio_dataset_valid.map(
97
+ function=clip_audio,
98
+ batched=True,
99
+ batch_size=128,
100
+ num_proc=1,
101
+ desc="clipping audio based on the duration:"
102
+ )
103
+
104
+ dataset_to_push = DatasetDict({"train": audio_dataset_valid})
105
+ repo_name = f"{hf_org}/{hf_dataset}"
106
+ while True:
107
+ try:
108
+ dataset_to_push.push_to_hub(repo_name)
109
+ break
110
+ except Exception:
111
+ print(f"FAILED: push_to_hub on {repo_name} failed. wait 60 sec and retry soon...")
112
+ time.sleep(60)
requirements.txt CHANGED
@@ -1,2 +1,5 @@
 
 
 
1
  requests
2
  pandas
 
1
+ datasets
2
+ soundfile
3
+ librosa
4
  requests
5
  pandas
upload_audio.py DELETED
@@ -1,31 +0,0 @@
1
- import json
2
- import os
3
- import tarfile
4
- import zipfile
5
- import gzip
6
- import subprocess
7
- from os.path import join as p_join
8
- from tqdm import tqdm
9
- from multiprocessing import Pool
10
- from typing import Optional
11
-
12
- import pandas as pd
13
-
14
- from datasets import Dataset, Audio
15
-
16
- # dataset config
17
- direction = os.getenv("DIRECTION", "enA-jaA")
18
- sides = set(direction.split("-"))
19
- cache_dir_audio = p_join("download", "audio", direction)
20
- cache_dir_feature = p_join("download", "feature", direction)
21
- os.makedirs(cache_dir_audio, exist_ok=True)
22
- os.makedirs(cache_dir_feature, exist_ok=True)
23
- # processor config
24
- n_pool = int(os.getenv("N_POOL", 8))
25
- wget_max_retry = os.getenv("MAX_RETRY", "1")
26
- wget_timeout = os.getenv("TIMEOUT", "20")
27
- line_no_start = int(os.getenv("LINE_NO_START", 0))
28
- line_no_end = int(os.getenv("LINE_NO_END", 10000))
29
-
30
- audio_dataset = Dataset.from_dict({"audio": ["path/to/audio_1", "path/to/audio_2", ..., "path/to/audio_n"]}).cast_column("audio", Audio())
31
- audio_dataset[0]["audio"]