asahi417 commited on
Commit
e4e64be
·
1 Parent(s): 9217cd8
Files changed (1) hide show
  1. download_audio.py +11 -9
download_audio.py CHANGED
@@ -29,11 +29,15 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
29
  output_file = p_join(cache_dir, filename)
30
  try:
31
  urllib.request.urlretrieve(url, output_file)
32
- except (ConnectionError, KeyboardInterrupt, HTTPError):
33
  traceback.print_exc()
34
  if os.path.exists(output_file):
35
  os.remove(output_file)
36
  return False
 
 
 
 
37
 
38
  if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
39
  if output_file.endswith('.tar'):
@@ -56,19 +60,17 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
56
 
57
 
58
  def get_metadata(url: str):
59
- cache_dir = p_join(cache_dir_root, "meta")
60
  filename = os.path.basename(url).replace(".gz", "")
61
  if not os.path.exists(filename):
62
- assert wget(url, cache_dir=cache_dir)
63
- df = pd.read_csv(p_join(cache_dir, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
64
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
65
  return df
66
 
67
 
68
  def get_audio(url: str, filename: str):
69
- cache_dir = p_join(cache_dir_root, "audio")
70
- if not os.path.exists(p_join(cache_dir, filename)):
71
- return wget(url, filename=filename, cache_dir=cache_dir)
72
  return True
73
 
74
 
@@ -78,12 +80,12 @@ def process_dataset(url_metadata):
78
  inputs = [(
79
  r['url'], f"{r['id']}.{r['direction']}.{r['side']}.{os.path.basename(r['url'])}"
80
  ) for _, r in df_metadata.iterrows()]
81
- inputs = [x for x in inputs if not os.path.exists(p_join(cache_dir_root, "audio", x[1]))]
82
  print(f"{len(inputs)} urls to download")
83
  if n_pool == 1:
84
  for url, filename in tqdm(inputs, total=len(inputs)):
85
  flag = get_audio(url, filename)
86
- if flag:
87
  print(f"failed:\n{url}")
88
  else:
89
  with Pool(n_pool) as pool:
 
29
  output_file = p_join(cache_dir, filename)
30
  try:
31
  urllib.request.urlretrieve(url, output_file)
32
+ except (ConnectionError, HTTPError):
33
  traceback.print_exc()
34
  if os.path.exists(output_file):
35
  os.remove(output_file)
36
  return False
37
+ except KeyboardInterrupt:
38
+ if os.path.exists(output_file):
39
+ os.remove(output_file)
40
+ exit()
41
 
42
  if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
43
  if output_file.endswith('.tar'):
 
60
 
61
 
62
  def get_metadata(url: str):
 
63
  filename = os.path.basename(url).replace(".gz", "")
64
  if not os.path.exists(filename):
65
+ assert wget(url, cache_dir=cache_dir_metadata)
66
+ df = pd.read_csv(p_join(cache_dir_metadata, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
67
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
68
  return df
69
 
70
 
71
  def get_audio(url: str, filename: str):
72
+ if not os.path.exists(p_join(cache_dir_audio, filename)):
73
+ return wget(url, filename=filename, cache_dir=cache_dir_audio)
 
74
  return True
75
 
76
 
 
80
  inputs = [(
81
  r['url'], f"{r['id']}.{r['direction']}.{r['side']}.{os.path.basename(r['url'])}"
82
  ) for _, r in df_metadata.iterrows()]
83
+ inputs = [x for x in inputs if not os.path.exists(p_join(cache_dir_audio, x[1]))]
84
  print(f"{len(inputs)} urls to download")
85
  if n_pool == 1:
86
  for url, filename in tqdm(inputs, total=len(inputs)):
87
  flag = get_audio(url, filename)
88
+ if not flag:
89
  print(f"failed:\n{url}")
90
  else:
91
  with Pool(n_pool) as pool: