ttestv0.1 / final3.py
Alignment-Lab-AI's picture
Upload final3.py with huggingface_hub
9525518 verified
import torch
import torchaudio
import time
import os
import numpy as np
import json
from datasets import load_dataset, Audio
from snac import SNAC
from torch.nn import functional as F
from tqdm import tqdm
import wandb
# Constants
SNAC_SAMPLE_RATE = 24000
OUTPUT_DIR = "processed_common_voice"
BATCH_SIZE = 1000
# Ensure CUDA is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_snac_model(sample_rate):
if sample_rate == 24000:
model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").eval().to(device)
else:
raise ValueError("Unsupported sample rate. Please use 24000.")
return model
snac_model = load_snac_model(SNAC_SAMPLE_RATE)
def chunk_and_pad_audio(audio, chunk_size):
length = audio.shape[-1]
padded_length = ((length + chunk_size - 1) // chunk_size) * chunk_size
padded_audio = F.pad(audio, (0, padded_length - length), mode="constant", value=0)
batched_audio = padded_audio.unfold(-1, size=chunk_size, step=chunk_size)
return batched_audio
def generate_snac_encoding(audio):
waveform = torch.tensor(audio["array"]).float().to(device)
if audio["sampling_rate"] != SNAC_SAMPLE_RATE:
resampler = torchaudio.transforms.Resample(
orig_freq=audio["sampling_rate"], new_freq=SNAC_SAMPLE_RATE
)
waveform = resampler(waveform)
if waveform.dim() == 2:
waveform = waveform.mean(dim=0, keepdim=True)
elif waveform.dim() == 1:
waveform = waveform.unsqueeze(0)
num_second = 1
chunk_size_initial = num_second * SNAC_SAMPLE_RATE
lcm = np.lcm.reduce([snac_model.vq_strides[0], snac_model.attn_window_size or 1])
pad_to = snac_model.hop_length * lcm
chunk_size = int(np.ceil(chunk_size_initial / pad_to) * pad_to)
audio = chunk_and_pad_audio(waveform, chunk_size)
audio = audio.permute(1, 0, 2)
codes_list = []
with torch.no_grad():
for chunk in audio:
codes = snac_model.encode(chunk.unsqueeze(0))
codes = [c.cpu() for c in codes]
codes_list.append(codes)
codes_list = [torch.cat(codes_list, dim=0) for codes_list in zip(*codes_list)]
codes_list = [code.reshape(-1).cpu().tolist() for code in codes_list]
string_codes = " ".join(map(str, codes_list[0]))
return string_codes
def process_audio(item):
start_time = time.time()
try:
snac_tokens = generate_snac_encoding(item["audio"])
if not snac_tokens:
raise ValueError("Generated SNAC tokens are empty")
except Exception as e:
return None
processing_time = time.time() - start_time
return {
"path": item["path"],
"sentence": item["sentence"],
"age": item["age"],
"gender": item["gender"],
"accent": item["accent"],
"locale": item["locale"],
"snac": snac_tokens,
"processing_time": processing_time,
"audio_duration": len(item["audio"]["array"]) / item["audio"]["sampling_rate"],
}
def save_to_jsonl(data, file_path):
# Open the file in append mode to add new data to the existing language-specific JSONL file
with open(file_path, "a") as f:
for item in data:
json.dump(item, f)
f.write("\n")
def process_language(language):
# Ensure output directory exists
language_dir = os.path.join(OUTPUT_DIR, language)
os.makedirs(language_dir, exist_ok=True)
jsonl_path = os.path.join(language_dir, f"{language}_processed.jsonl")
# Read existing data
existing_data = set()
if os.path.exists(jsonl_path):
with open(jsonl_path, "r") as f:
existing_data = set(f.readlines())
# Load the Common Voice dataset for this language
dataset = load_dataset(
"mozilla-foundation/common_voice_16_1", language, split="train", streaming=True
)
# Cast the dataset to include audio
dataset = dataset.cast_column("audio", Audio(sampling_rate=SNAC_SAMPLE_RATE))
processed_data = []
total_processed = 0
report_counter = 0
for item in tqdm(dataset, desc=f"Processing {language}"):
result = process_audio(item)
if result:
json_line = json.dumps(result) + "\n"
if json_line not in existing_data:
processed_data.append(result)
existing_data.add(json_line)
total_processed += 1
report_counter += 1
if report_counter % 1000 == 0: # Report to wandb every 1000 rows
wandb.log(
{
"language": language,
"average_processing_time": np.mean(
[item["processing_time"] for item in processed_data]
),
"average_audio_duration": np.mean(
[item["audio_duration"] for item in processed_data]
),
"average_snac_token_count": np.mean(
[len(item["snac"].split()) for item in processed_data]
),
}
)
report_counter = 0 # Reset the counter
# Save every BATCH_SIZE items
if len(processed_data) >= BATCH_SIZE:
save_to_jsonl(processed_data, jsonl_path)
processed_data = [] # Clear the list after saving
# Save any remaining processed data
if processed_data:
save_to_jsonl(processed_data, jsonl_path)
return total_processed
def main():
# Initialize wandb
wandb.init(project="common-voice-processing", job_type="data-processing")
# List of languages to process, starting with English
languages = ['ckb', 'cnh', 'cs', 'cv', 'cy', 'da', 'de']
# languages = ['dv', 'dyu', 'el', 'en', 'eo', 'es', 'et']
# languages = ['eu', 'fa', 'fi', 'fr', 'fy-NL', 'ga-IE', 'gl']
# languages = ['gn', 'ha', 'he', 'hi', 'hsb', 'hu', 'hy-AM']
# languages = ['ia', 'id', 'ig', 'is', 'it', 'ja', 'ka']
# languages = ['kab', 'kk', 'kmr', 'ko', 'ky', 'lg', 'lij']
# languages = ['lo', 'lt', 'ltg', 'lv', 'mdf', 'mhr', 'mk']
# languages = ['ml', 'mn', 'mr', 'mrj', 'mt', 'myv', 'nan-tw']
# languages = ['ne-NP', 'nhi', 'nl', 'nn-NO', 'oc', 'or', 'os']
# languages = ['pa-IN', 'pl', 'ps', 'pt', 'quy', 'rm-sursilv', 'rm-vallader']
# languages = ['ro', 'ru', 'rw', 'sah', 'sat', 'sc', 'sk']
# languages = ['skr', 'sl', 'sq', 'sr', 'sv-SE', 'sw', 'ta']
# languages = ['te', 'th', 'ti', 'tig', 'tk', 'tok', 'tr']
# languages = ['tt', 'tw', 'ug', 'uk', 'ur', 'uz', 'vi', 'vot', 'yi', 'yo', 'yue', 'zgh', 'zh-CN', 'zh-HK', 'zh-TW']
total_processed_all_languages = 0
# Process each language
for language in languages:
total_processed = process_language(language)
total_processed_all_languages += total_processed
print(
f"\nCompleted processing all languages. Total files processed across all languages: {total_processed_all_languages}"
)
wandb.finish()
if __name__ == "__main__":
main()