Spaces:
Running
Running
File size: 3,096 Bytes
9791162 cbdd616 9791162 cbdd616 9791162 cbdd616 9791162 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import sys,os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import numpy as np
import argparse
import torch
import librosa
import requests
from tqdm import tqdm
from hubert import hubert_model
def load_audio(file: str, sr: int = 16000):
x, sr = librosa.load(file, sr=sr)
return x
def load_model(path, device):
model = hubert_model.hubert_soft(path)
model.eval()
if not (device == "cpu"):
model.half()
model.to(device)
return model
def check_and_download_model():
temp_dir = "/tmp"
model_path = os.path.join(temp_dir, "hubert-soft-0d54a1f4.pt")
if os.path.exists(model_path):
return f"モデルは既に存在します: {model_path}"
url = "https://github.com/bshall/hubert/releases/download/v0.1/hubert-soft-0d54a1f4.pt"
try:
response = requests.get(url, stream=True)
response.raise_for_status()
total_size = int(response.headers.get('content-length', 0))
with open(model_path, 'wb') as f, tqdm(
desc=model_path,
total=total_size,
unit='iB',
unit_scale=True,
unit_divisor=1024,
) as pbar:
for data in response.iter_content(chunk_size=1024):
size = f.write(data)
pbar.update(size)
return f"モデルのダウンロードが完了しました: {model_path}"
except Exception as e:
return f"エラーが発生しました: {e}"
def pred_vec(model, wavPath, vecPath, device):
audio = load_audio(wavPath)
audln = audio.shape[0]
vec_a = []
idx_s = 0
while (idx_s + 20 * 16000 < audln):
feats = audio[idx_s:idx_s + 20 * 16000]
feats = torch.from_numpy(feats).to(device)
feats = feats[None, None, :]
if not (device == "cpu"):
feats = feats.half()
with torch.no_grad():
vec = model.units(feats).squeeze().data.cpu().float().numpy()
vec_a.extend(vec)
idx_s = idx_s + 20 * 16000
if (idx_s < audln):
feats = audio[idx_s:audln]
feats = torch.from_numpy(feats).to(device)
feats = feats[None, None, :]
if not (device == "cpu"):
feats = feats.half()
with torch.no_grad():
vec = model.units(feats).squeeze().data.cpu().float().numpy()
# print(vec.shape) # [length, dim=256] hop=320
vec_a.extend(vec)
np.save(vecPath, vec_a, allow_pickle=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--wav", help="wav", dest="wav", required=True)
parser.add_argument("-v", "--vec", help="vec", dest="vec", required=True)
args = parser.parse_args()
print(args.wav)
print(args.vec)
wavPath = args.wav
vecPath = args.vec
device = "cuda" if torch.cuda.is_available() else "cpu"
_ = check_and_download_model()
hubert = load_model("/tmp/hubert-soft-0d54a1f4.pt", device)
pred_vec(hubert, wavPath, vecPath, device)
|