asahi417 commited on
Commit
af7193a
·
1 Parent(s): a219aaf
Files changed (3) hide show
  1. delete_audio.py +1 -1
  2. download_audio.py +43 -19
  3. main.sh +2 -7
delete_audio.py CHANGED
@@ -9,7 +9,7 @@ cache_dir_feature = p_join("download", "feature", direction)
9
  line_no_start = int(os.getenv("LINE_NO_START", 0))
10
  line_no_end = int(os.getenv("LINE_NO_END", 10000))
11
  for i in tqdm(range(line_no_start, line_no_end), total=line_no_end-line_no_start):
12
- for audio_file in glob(p_join(cache_dir_audio, "*", "*")):
13
  os.remove(audio_file)
14
  if os.path.exists(p_join(cache_dir_feature, f"{i}.json")):
15
  os.remove(p_join(cache_dir_feature, f"{i}.json"))
 
9
  line_no_start = int(os.getenv("LINE_NO_START", 0))
10
  line_no_end = int(os.getenv("LINE_NO_END", 10000))
11
  for i in tqdm(range(line_no_start, line_no_end), total=line_no_end-line_no_start):
12
+ for audio_file in glob(p_join(cache_dir_audio, "*", f"*{i}*")):
13
  os.remove(audio_file)
14
  if os.path.exists(p_join(cache_dir_feature, f"{i}.json")):
15
  os.remove(p_join(cache_dir_feature, f"{i}.json"))
download_audio.py CHANGED
@@ -4,7 +4,6 @@ import tarfile
4
  import zipfile
5
  import gzip
6
  import subprocess
7
- import time
8
  from os.path import join as p_join
9
  from tqdm import tqdm
10
  from multiprocessing import Pool
@@ -37,7 +36,7 @@ line_no_end = int(os.getenv("LINE_NO_END", 10000))
37
  dataset_id = os.getenv("DATASET_ID", 0)
38
  hf_org = os.getenv("HF_ORG", "asahi417")
39
  hf_dataset = f"seamless-align-{direction}"
40
- skip_wget = bool(int(os.getenv("SKIP_WGET", 0)))
41
 
42
 
43
  def wget(url: str, output_file: Optional[str] = None):
@@ -127,21 +126,21 @@ def get_audio(dataframe: pd.DataFrame):
127
 
128
 
129
  if __name__ == '__main__':
130
- df_metadata = get_metadata()
131
- print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
132
- # inputs = [
133
- # g for line_no, g in df_metadata.groupby("line_no")
134
- # if line_no_start <= line_no < line_no_end and not os.path.exists(
135
- # p_join(cache_dir_feature, f'{int(line_no)}.json')
136
- # )
137
- # ]
138
- inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end]
139
-
140
- print(f"filtered unique lines: {len(inputs)}")
141
- if direction == "enA-jaA":
142
- inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
143
- print(f"removed side != 2: {len(inputs)}")
144
  if not skip_wget:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  if n_pool == 1:
146
  for g in tqdm(inputs, total=len(inputs)):
147
  line_no = get_audio(g)
@@ -166,9 +165,34 @@ if __name__ == '__main__':
166
  audio_dataset = Dataset.from_dict(data_dict)
167
  for side in sides:
168
  audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
169
- dataset_to_push = DatasetDict({"train": audio_dataset})
170
- repo_name = f"{hf_org}/{hf_dataset}"
171
- dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  # while True:
173
  # try:
174
  # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
 
4
  import zipfile
5
  import gzip
6
  import subprocess
 
7
  from os.path import join as p_join
8
  from tqdm import tqdm
9
  from multiprocessing import Pool
 
36
  dataset_id = os.getenv("DATASET_ID", 0)
37
  hf_org = os.getenv("HF_ORG", "asahi417")
38
  hf_dataset = f"seamless-align-{direction}"
39
+ skip_wget = bool(int(os.getenv("SKIP_WGET", 1)))
40
 
41
 
42
  def wget(url: str, output_file: Optional[str] = None):
 
126
 
127
 
128
  if __name__ == '__main__':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  if not skip_wget:
130
+ df_metadata = get_metadata()
131
+ print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
132
+ # inputs = [
133
+ # g for line_no, g in df_metadata.groupby("line_no")
134
+ # if line_no_start <= line_no < line_no_end and not os.path.exists(
135
+ # p_join(cache_dir_feature, f'{int(line_no)}.json')
136
+ # )
137
+ # ]
138
+ inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end]
139
+ print(f"filtered unique lines: {len(inputs)}")
140
+ if direction == "enA-jaA":
141
+ inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
142
+ print(f"removed side != 2: {len(inputs)}")
143
+
144
  if n_pool == 1:
145
  for g in tqdm(inputs, total=len(inputs)):
146
  line_no = get_audio(g)
 
165
  audio_dataset = Dataset.from_dict(data_dict)
166
  for side in sides:
167
  audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
168
+ # DatasetDict({"train": audio_dataset}).push_to_hub(
169
+ # f"{hf_org}/{hf_dataset}",
170
+ # config_name=f"subset_{dataset_id}"
171
+ # )
172
+
173
+
174
+ DatasetDict({"train": audio_dataset.select(list(range(1000)))}).push_to_hub(
175
+ f"{hf_org}/{hf_dataset}",
176
+ config_name=f"subset_{dataset_id}"
177
+ )
178
+
179
+ # # 2 panel
180
+ # dataset_id = 75
181
+ DatasetDict({"train": audio_dataset.select(list(range(3000, len(audio_dataset))))}).push_to_hub(
182
+ f"{hf_org}/{hf_dataset}",
183
+ config_name=f"subset_{dataset_id}"
184
+ )
185
+ #
186
+ #
187
+
188
+
189
+ # audio_dataset = audio_dataset.select(list(range(2500)))
190
+ # dataset_to_push = DatasetDict({"train": audio_dataset})
191
+ # repo_name = f"{hf_org}/{hf_dataset}"
192
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
193
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", max_shard_size="2GiB")
194
+ # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}", num_shards={"train": 1})
195
+
196
  # while True:
197
  # try:
198
  # dataset_to_push.push_to_hub(repo_name, config_name=f"subset_{dataset_id}")
main.sh CHANGED
@@ -1,12 +1,7 @@
1
  ####################
2
  # enA-jaA: 718_606 #
3
  ####################
4
- export DATASET_ID=2
5
- export DIRECTION="enA-jaA"
6
- export LINE_NO_START=$(((DATASET_ID-1) * 5000))
7
- export LINE_NO_END=$((DATASET_ID * 5000))
8
- python download_audio.py
9
-
10
  export DATASET_ID=4
11
  export DIRECTION="enA-jaA"
12
  export LINE_NO_START=$(((DATASET_ID-1) * 5000))
@@ -76,7 +71,7 @@ export LINE_NO_START=$(((DATASET_ID-1) * 5000))
76
  export LINE_NO_END=$((DATASET_ID * 5000))
77
  python download_audio.py
78
 
79
- export DATASET_ID=7
80
  export DIRECTION="enA-jaA"
81
  export LINE_NO_START=$(((DATASET_ID-1) * 5000))
82
  export LINE_NO_END=$((DATASET_ID * 5000))
 
1
  ####################
2
  # enA-jaA: 718_606 #
3
  ####################
4
+ # TODO: 4, 6, 24, 25, 26
 
 
 
 
 
5
  export DATASET_ID=4
6
  export DIRECTION="enA-jaA"
7
  export LINE_NO_START=$(((DATASET_ID-1) * 5000))
 
71
  export LINE_NO_END=$((DATASET_ID * 5000))
72
  python download_audio.py
73
 
74
+ export DATASET_ID=25
75
  export DIRECTION="enA-jaA"
76
  export LINE_NO_START=$(((DATASET_ID-1) * 5000))
77
  export LINE_NO_END=$((DATASET_ID * 5000))