cdminix commited on
Commit
a3969d9
·
verified ·
1 Parent(s): 2213256

add speaker name

Browse files
Files changed (1) hide show
  1. libriheavy.py +7 -0
libriheavy.py CHANGED
@@ -2,10 +2,12 @@ import json
2
  import gzip
3
  import os
4
  from pathlib import Path
 
5
 
6
  import datasets
7
  import numpy as np
8
  from tqdm import tqdm
 
9
 
10
  logger = datasets.logging.get_logger(__name__)
11
 
@@ -117,6 +119,9 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
117
  metadata_path = dl_manager.download_and_extract(metadata_path)
118
  with open(metadata_path, "r") as f:
119
  speaker_metadata[speaker_id] = json.load(f)
 
 
 
120
  with open(f"{metadata_cache}/{speaker_id}.json", "w") as f:
121
  json.dump(speaker_metadata[speaker_id], f)
122
 
@@ -127,6 +132,7 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
127
  for chunk_id, chunk in metadata["chunks"].items():
128
  chunk_dict = {
129
  "speaker_id": speaker_id,
 
130
  "id": f"{speaker_id}_{chunk_id}",
131
  "audio": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['npz'].replace('.gz', '')}"),
132
  "text": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['json']}"),
@@ -174,6 +180,7 @@ class Libriheavy(datasets.GeneratorBasedBuilder):
174
  result = {
175
  "id": chunk["speaker_id"] + "_" + utterance_id,
176
  "speaker_id": chunk["speaker_id"],
 
177
  "speaker_vec": npz_item["d_vector"][0],
178
  "audio": chunk["audio"],
179
  "text": chunk["text"],
 
2
  import gzip
3
  import os
4
  from pathlib import Path
5
+ import re
6
 
7
  import datasets
8
  import numpy as np
9
  from tqdm import tqdm
10
+ import requests
11
 
12
  logger = datasets.logging.get_logger(__name__)
13
 
 
119
  metadata_path = dl_manager.download_and_extract(metadata_path)
120
  with open(metadata_path, "r") as f:
121
  speaker_metadata[speaker_id] = json.load(f)
122
+ speaker_name = requests.get(f"https://librivox.org/reader/{speaker_id}").text
123
+ speaker_name = re.findall("<h1>([^<>]+)</h1>", speaker_name)[0]
124
+ speaker_metadata[speaker_id]["name"] = speaker_name
125
  with open(f"{metadata_cache}/{speaker_id}.json", "w") as f:
126
  json.dump(speaker_metadata[speaker_id], f)
127
 
 
132
  for chunk_id, chunk in metadata["chunks"].items():
133
  chunk_dict = {
134
  "speaker_id": speaker_id,
135
+ "speaker_name": metadata["name"],
136
  "id": f"{speaker_id}_{chunk_id}",
137
  "audio": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['npz'].replace('.gz', '')}"),
138
  "text": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['json']}"),
 
180
  result = {
181
  "id": chunk["speaker_id"] + "_" + utterance_id,
182
  "speaker_id": chunk["speaker_id"],
183
+ "speaker_name": chunk["speaker_name"],
184
  "speaker_vec": npz_item["d_vector"][0],
185
  "audio": chunk["audio"],
186
  "text": chunk["text"],