|
|
|
|
|
from __future__ import annotations |
|
|
|
import os |
|
import gc |
|
import json |
|
import torch |
|
import torchaudio |
|
|
|
from time import perf_counter |
|
from datasets import Dataset, Features, Audio, Value, ClassLabel |
|
from typing import Sequence, Dict, Any, Iterable, Iterator, TYPE_CHECKING |
|
|
|
if TYPE_CHECKING: |
|
|
|
from typing_extensions import Literal |
|
|
|
class RotatingAudioDirectoryData: |
|
""" |
|
A helper class for reading data just-in-time from a directory, and maintaining |
|
a FIFO-based cache of data in memory. |
|
|
|
Also maintains a loose memory size, evicting last-read data when necessary. |
|
Accessing data will opaquely read from disk when eneded, with access resetting |
|
its place in the eviction queue. |
|
""" |
|
data: Dict[str, torch.Tensor] |
|
data_sizes: Dict[str, int] |
|
data_access_times: Dict[str, float] |
|
sample_rate: int |
|
eviction_rate: float |
|
|
|
def __init__(self, directory: str, max_size: int, eviction_rate: float=0.25) -> None: |
|
""" |
|
:param directory: The directory to read data from. |
|
:param max_size: The maximum size of data to keep in memory. |
|
:param eviction_rate: The fraction of data to evict when full. |
|
""" |
|
self.directory = directory |
|
self.max_size = max_size |
|
self.file_names = [ |
|
f for f in os.listdir(directory) |
|
if f.endswith(".wav") |
|
and not f.startswith(".") |
|
] |
|
self.data = {} |
|
self.data_sizes = {} |
|
self.access_times = {} |
|
self.eviction_rate = eviction_rate |
|
self.sample_rate = 0 |
|
self.read(self.file_names[0]) |
|
|
|
@property |
|
def size(self) -> int: |
|
""" |
|
:return: The total size of data in memory. |
|
""" |
|
return sum(self.data_sizes.values()) |
|
|
|
def evict(self) -> None: |
|
""" |
|
Evicts the least-recently accessed items from memory. |
|
|
|
This is made to be called infrequently, so it will evict up to the |
|
configured eviction rate (default 25%) of the total data. |
|
""" |
|
num_to_evict = int(self.eviction_rate * len(self.data)) |
|
evict_keys = sorted(self.access_times, key=self.access_times.get)[:num_to_evict] |
|
for key in evict_keys: |
|
del self.data[key] |
|
del self.data_sizes[key] |
|
del self.access_times[key] |
|
gc.collect() |
|
|
|
def check_evict(self, size: int) -> None: |
|
""" |
|
Checks if the new data will fit, and evicts if necessary. |
|
|
|
:param size: The size of the new data to add. |
|
""" |
|
if self.size + size > self.max_size: |
|
self.evict() |
|
|
|
def read(self, file_name: str) -> None: |
|
""" |
|
Reads a file from disk and stores it in memory. |
|
|
|
:param file_name: The name of the file to read. |
|
""" |
|
file_path = os.path.join(self.directory, file_name) |
|
file_size = os.path.getsize(file_path) * 2 |
|
self.check_evict(file_size) |
|
|
|
try: |
|
data, sample_rate = torchaudio.load(file_path) |
|
except RuntimeError as e: |
|
raise RuntimeError(f"Error reading file {file_path}: {e}") |
|
|
|
if self.sample_rate == 0: |
|
self.sample_rate = sample_rate |
|
|
|
assert self.sample_rate == sample_rate, "Unexpected sample rate mismatch" |
|
|
|
self.data[file_name] = data[0] |
|
self.data_sizes[file_name] = file_size |
|
self.access_times[file_name] = perf_counter() |
|
|
|
def __getitem__(self, key: str) -> torch.Tensor: |
|
""" |
|
Gets an item from the data, reading it from disk if necessary. |
|
|
|
:param key: The key of the item to get. |
|
:return: The data corresponding to the key. |
|
""" |
|
if key not in self.data: |
|
if key not in self.file_names: |
|
raise KeyError(f"File {key} not found in directory") |
|
self.read(key) |
|
self.access_times[key] = perf_counter() |
|
return self.data[key] |
|
|
|
def maybe_use_tqdm( |
|
iterable: Iterable[Any], |
|
desc: Optional[str]=None |
|
) -> Iterator[Any]: |
|
""" |
|
Uses tqdm if available, otherwise iterates as-is. |
|
|
|
:param iterable: The iterable to iterate over. |
|
:param desc: The description to show in the progress bar. |
|
:return: The iterator over the iterable. |
|
""" |
|
try: |
|
import tqdm |
|
yield from tqdm.tqdm(iterable, desc=desc) |
|
except ImportError: |
|
yield from iterable |
|
|
|
def mix_audio(audio_to_mix: Sequence[torch.Tensor]) -> torch.Tensor: |
|
""" |
|
Mixes multiple audio arrays together. |
|
""" |
|
mixed_audio = torch.stack(audio_to_mix) |
|
return torch.mean(mixed_audio, dim=0) |
|
|
|
def get_seconds_from_timestamp(timestamp: str) -> float: |
|
""" |
|
Converts a timestamp string to seconds. |
|
Expects a timestamp of format `hh:mm:ss.ff` |
|
|
|
:param timestamp: The timestamp string to convert. |
|
:return: The number of seconds represented by the timestamp. |
|
""" |
|
parts = timestamp.split(":") |
|
hours = int(parts[0]) |
|
minutes = int(parts[1]) |
|
seconds = float(parts[2]) |
|
return hours * 3600 + minutes * 60 + seconds |
|
|
|
def process_session_file( |
|
session_file: str, |
|
wav_data: RotatingAudioDirectoryData, |
|
channel_mode: Literal["split", "mixed"] = "split", |
|
) -> Iterable[Dict[str, Any]]: |
|
""" |
|
Processes a single session file. |
|
|
|
:param session_file: The path to the session file to process. |
|
:param wav_data: The audio data to use for the session. |
|
:param channel_mode: The channel mode to use for processing. |
|
:return: An iterator over the processed utterances. |
|
""" |
|
with open(session_file, "r") as f: |
|
session_data = json.load(f) |
|
|
|
|
|
participant_position_device_distance_map = [ |
|
[1600, 2240, 3825, 2900, 1760], |
|
[1990, 2130, 3950, 3100, 1760], |
|
[1820, 1520, 2900, 2030, 2790], |
|
[1300, 1120, 3100, 2520, 2820] |
|
] |
|
|
|
participant_session_positions = [ |
|
[ 0, 1, 2, 3], |
|
[ 4, 5, 6, 7], |
|
[ 8, 9, 10, 11], |
|
[12, 13, 14, 15], |
|
[16, 17, 18, 19], |
|
[20, 21, 22, 23], |
|
[20, 21, 22, 23], |
|
[24, 25, 26, 27], |
|
[28, 29, 30, 31], |
|
[28, 29, 30, 31] |
|
] |
|
|
|
for utterance in maybe_use_tqdm(session_data, desc=f"Processing utterances"): |
|
|
|
start_times = utterance["start_time"] |
|
end_times = utterance["end_time"] |
|
words = utterance["words"] |
|
gender = utterance["gender"] |
|
nativeness = utterance["nativeness"] |
|
mother_tongue = utterance["mother_tongue"] |
|
|
|
session_id_label = utterance["session_id"] |
|
session_id = int(session_id_label[1:]) - 1 |
|
|
|
participant_id_label = utterance["speaker_id"] |
|
participant_id = int(participant_id_label[1:]) - 1 |
|
|
|
|
|
participant_position_index = participant_session_positions[session_id].index(participant_id) |
|
participant_device_distances = participant_position_device_distance_map[participant_position_index] |
|
|
|
speaker_metadata = { |
|
"transcription": words, |
|
"participant_id": participant_id_label, |
|
"session_id": session_id_label, |
|
"gender": gender, |
|
"nativeness": nativeness, |
|
"mother_tongue": mother_tongue, |
|
} |
|
|
|
|
|
|
|
for time_key in start_times.keys(): |
|
|
|
start_timestamp = start_times[time_key] |
|
start_time_s = get_seconds_from_timestamp(start_timestamp) |
|
start_frame = int(start_time_s * wav_data.sample_rate) |
|
|
|
end_timestamp = end_times[time_key] |
|
end_time_s = get_seconds_from_timestamp(end_timestamp) |
|
end_frame = int(end_time_s * wav_data.sample_rate) |
|
|
|
utterance_metadata = { |
|
**speaker_metadata, |
|
**{ |
|
"start_timestamp": start_timestamp, |
|
"start_time_s": start_time_s, |
|
"start_frame": start_frame, |
|
"end_timestamp": end_timestamp, |
|
"end_time_s": end_time_s, |
|
"end_frame": end_frame, |
|
"duration_s": end_time_s - start_time_s, |
|
"duration_frames": end_frame - start_frame, |
|
}, |
|
} |
|
|
|
if time_key == "close-talk": |
|
|
|
device_metadata = { |
|
"device_type": "close-talk", |
|
"device_id": participant_id_label, |
|
"device_channel": 0, |
|
"device_distance_mm": 0, |
|
} |
|
wav_file_name = f"{session_id_label}_{participant_id_label}.wav" |
|
audio_array = wav_data[wav_file_name][start_frame:end_frame] |
|
yield { |
|
"audio": { |
|
"array": audio_array, |
|
"path": wav_file_name, |
|
"sampling_rate": wav_data.sample_rate, |
|
}, |
|
**device_metadata, |
|
**utterance_metadata, |
|
} |
|
else: |
|
|
|
device_id = int(time_key[1:]) - 1 |
|
|
|
device_metadata = { |
|
"device_type": "far-field", |
|
"device_id": time_key, |
|
"device_distance_mm": participant_device_distances[device_id], |
|
} |
|
audio_to_mix = [] |
|
|
|
for channel in range(7): |
|
wav_file_name = f"{session_id_label}_{time_key}.CH{channel+1}.wav" |
|
audio_array = wav_data[wav_file_name][start_frame:end_frame] |
|
|
|
if channel_mode == "split": |
|
|
|
yield { |
|
"audio": { |
|
"array": audio_array, |
|
"path": wav_file_name, |
|
"sampling_rate": wav_data.sample_rate, |
|
}, |
|
"device_channel": channel+1, |
|
**device_metadata, |
|
**utterance_metadata, |
|
} |
|
else: |
|
|
|
audio_to_mix.append(audio_array) |
|
|
|
if channel_mode == "mixed": |
|
|
|
audio_array = mix_audio(audio_to_mix) |
|
yield { |
|
"audio": { |
|
"array": audio_array, |
|
"sampling_rate": wav_data.sample_rate, |
|
}, |
|
"device_channel": 0, |
|
**device_metadata, |
|
**utterance_metadata, |
|
} |
|
|
|
def process_split( |
|
dipco_path: str, |
|
dipco_split: str, |
|
channel_mode: Literal["split", "mixed"] = "split", |
|
max_memory_bytes: int = 16*1024**3, |
|
) -> Iterable[Dict[str, Any]]: |
|
""" |
|
Processes a split of the DiPCo dataset, iterating through all session files. |
|
|
|
:param dipco_path: The path to the DiPCo dataset. |
|
:param dipco_split: The split of the DiPCo dataset to process. |
|
:param channel_mode: The channel mode to use for processing. |
|
:param max_memory_bytes: The maximum memory to use for audio data. |
|
:return: An iterator over the processed utterances. |
|
:see: process_session_file |
|
""" |
|
dipco_path = os.path.abspath(dipco_path) |
|
wav_dir = os.path.join(dipco_path, "audio", dipco_split) |
|
wav_data = RotatingAudioDirectoryData(wav_dir, max_size=max_memory_bytes) |
|
transcriptions_dir = os.path.join(dipco_path, "transcriptions", dipco_split) |
|
session_filenames = [ |
|
f for f in os.listdir(transcriptions_dir) |
|
if f.endswith(".json") |
|
and not f.startswith(".") |
|
] |
|
|
|
for session_filename in maybe_use_tqdm(session_filenames, desc=f"Processing session data"): |
|
num_yielded = 0 |
|
for utterance in process_session_file( |
|
os.path.join(transcriptions_dir, session_filename), |
|
wav_data, |
|
channel_mode=channel_mode, |
|
): |
|
num_yielded += 1 |
|
yield utterance |
|
|
|
print(f"Parsed {num_yielded} utterances from {session_filename}") |
|
|
|
del wav_data |
|
gc.collect() |
|
|
|
def get_split_dataset( |
|
dipco_path: str, |
|
dipco_split: str, |
|
channel_mode: Literal["split", "mixed"] = "split", |
|
) -> Dataset: |
|
""" |
|
Gets a split of the DiPCo dataset as a Dataset object. |
|
|
|
:param dipco_path: The path to the DiPCo dataset. |
|
:param dipco_split: The split of the DiPCo dataset to process. |
|
:param channel_mode: The channel mode to use for processing. |
|
:return: The processed dataset. |
|
:see: process_split |
|
""" |
|
gen_kwargs = { |
|
"dipco_path": dipco_path, |
|
"dipco_split": dipco_split, |
|
"channel_mode": channel_mode, |
|
} |
|
|
|
return Dataset.from_generator( |
|
process_split, |
|
gen_kwargs=gen_kwargs, |
|
features=Features({ |
|
"audio": Audio(), |
|
"start_timestamp": Value(dtype="string"), |
|
"start_time_s": Value(dtype="float32"), |
|
"start_frame": Value(dtype="uint64"), |
|
"end_timestamp": Value(dtype="string"), |
|
"end_time_s": Value(dtype="float32"), |
|
"end_frame": Value(dtype="uint64"), |
|
"duration_s": Value(dtype="float32"), |
|
"duration_frames": Value(dtype="uint64"), |
|
"transcription": Value(dtype="string"), |
|
"mother_tongue": Value(dtype="string"), |
|
"participant_id": Value(dtype="string"), |
|
"session_id": Value(dtype="string"), |
|
"device_id": Value(dtype="string"), |
|
"device_channel": Value(dtype="uint8"), |
|
"device_distance_mm": Value(dtype="uint16"), |
|
"device_type": ClassLabel( |
|
num_classes=2, |
|
names=["close-talk", "far-field"] |
|
), |
|
"gender": ClassLabel( |
|
num_classes=2, |
|
names=["female", "male"] |
|
), |
|
"nativeness": ClassLabel( |
|
num_classes=2, |
|
names=["native", "non-native"] |
|
), |
|
}) |
|
) |
|
|
|
def synchronize_split( |
|
dipco_path: str, |
|
dipco_split: str, |
|
hub_path: str, |
|
hub_split: str, |
|
channel_mode: Literal["split", "mixed"] = "split", |
|
set_default: bool = False, |
|
) -> None: |
|
""" |
|
Synchronizes a split of the DiPCo dataset to hub. |
|
|
|
:param dipco_path: The path to the DiPCo dataset. |
|
:param dipco_split: The split of the DiPCo dataset to process. |
|
:param hub_path: The path to the hub dataset. |
|
:param hub_split: The split of the hub dataset to push to. |
|
:param channel_mode: The channel mode to use for processing. |
|
:param set_default: Whether to set the split as the default. |
|
:see: get_split_dataset |
|
""" |
|
dataset = get_split_dataset( |
|
dipco_path=dipco_path, |
|
dipco_split=dipco_split, |
|
channel_mode=channel_mode, |
|
) |
|
dataset.push_to_hub( |
|
hub_path, |
|
config_name=f"{channel_mode}-channel", |
|
split=hub_split, |
|
set_default=set_default, |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
dipco_path = "./Dipco" |
|
hub_path = "benjamin-paine/dinner-party-corpus" |
|
channel_modes = ["split", "mixed"] |
|
split_maps = [("dev", "train"), ("eval", "test")] |
|
|
|
|
|
for i, channel_mode in enumerate(channel_modes): |
|
for j, (dipco_split, hub_split) in enumerate(split_maps): |
|
synchronize_split( |
|
dipco_path=dipco_path, |
|
dipco_split=dipco_split, |
|
hub_path=hub_path, |
|
hub_split=hub_split, |
|
channel_mode=channel_mode, |
|
set_default=i==0 and j==0, |
|
) |
|
|