yuchen005 commited on
Commit
acaa03c
1 Parent(s): 2f517d1

Upload add_speech_feats_to_train_data.py

Browse files
Files changed (1) hide show
  1. add_speech_feats_to_train_data.py +42 -0
add_speech_feats_to_train_data.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, random, copy
2
+ import numpy as np
3
+ import torch
4
+ import pandas as pd
5
+ import torchaudio
6
+ from tqdm.notebook import tqdm
7
+ import collections, json
8
+ import re, sys
9
+ import os, copy
10
+ from pathlib import Path
11
+ from typing import Optional
12
+ import whisper
13
+
14
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
15
+ model = whisper.load_model('large-v2')
16
+ model.eval()
17
+
18
+ data = torch.load('./train_chime4.pt')
19
+
20
+ data_with_speech = []
21
+ for item in data:
22
+ with torch.no_grad():
23
+ ### TO FILL BY USERS:
24
+ # use utterance id (item['id']) to retrieve parallel audio paths: clean_audio_path, noisy_audio_path
25
+
26
+ ### extract clean audio feats
27
+ clean_audio = whisper.load_audio(clean_audio_path)
28
+ clean_audio = whisper.pad_or_trim(clean_audio)
29
+ clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device)
30
+ clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0]
31
+
32
+ # noisy audio feats
33
+ noisy_audio = whisper.load_audio(noisy_audio_path)
34
+ noisy_audio = whisper.pad_or_trim(noisy_audio)
35
+ noisy_mel = whisper.log_mel_spectrogram(noisy_audio).to(model.device)
36
+ noisy_audio_features = model.encoder(noisy_mel.unsqueeze(0))[0]
37
+
38
+ item_with_speech = {**item, 'audio_features': noisy_audio_features, 'clean_audio_features': clean_audio_features}
39
+ data_with_speech.append(item_with_speech)
40
+
41
+ torch.save(data_with_speech, './train_chime4_with_speech.pt')
42
+