Spaces:
Sleeping
Sleeping
File size: 11,212 Bytes
1a9b87d 2a5a725 1a9b87d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
import logging
import math
import os
import subprocess
from io import BytesIO
import librosa
import numpy as np
import torch
import torch.nn.functional as F
import torchaudio
from audio_separator.separator import Separator
from einops import rearrange
from funasr.download.download_from_hub import download_model
from funasr.models.emotion2vec.model import Emotion2vec
from transformers import Wav2Vec2FeatureExtractor
from memo.models.emotion_classifier import AudioEmotionClassifierModel
from memo.models.wav2vec import Wav2VecModel
logger = logging.getLogger(__name__)
def resample_audio(input_audio_file: str, output_audio_file: str, sample_rate: int = 16000):
p = subprocess.Popen(
[
"ffmpeg",
"-y",
"-v",
"error",
"-i",
input_audio_file,
"-ar",
str(sample_rate),
output_audio_file,
]
)
ret = p.wait()
assert ret == 0, f"Resample audio failed! Input: {input_audio_file}, Output: {output_audio_file}"
return output_audio_file
@torch.no_grad()
def preprocess_audio(
wav_path: str,
fps: int,
wav2vec_model: str,
vocal_separator_model: str = None,
cache_dir: str = "",
device: str = "cuda",
sample_rate: int = 16000,
num_generated_frames_per_clip: int = -1,
):
"""
Preprocess the audio file and extract audio embeddings.
Args:
wav_path (str): Path to the input audio file.
fps (int): Frames per second for the audio processing.
wav2vec_model (str): Path to the pretrained Wav2Vec model.
vocal_separator_model (str, optional): Path to the vocal separator model. Defaults to None.
cache_dir (str, optional): Directory for cached files. Defaults to "".
device (str, optional): Device to use ('cuda' or 'cpu'). Defaults to "cuda".
sample_rate (int, optional): Sampling rate for audio processing. Defaults to 16000.
num_generated_frames_per_clip (int, optional): Number of generated frames per clip for padding. Defaults to -1.
Returns:
tuple: A tuple containing:
- audio_emb (torch.Tensor): The processed audio embeddings.
- audio_length (int): The length of the audio in frames.
"""
# Initialize Wav2Vec model
audio_encoder = Wav2VecModel.from_pretrained(wav2vec_model).to(device=device)
audio_encoder.feature_extractor._freeze_parameters()
# Initialize Wav2Vec feature extractor
wav2vec_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(wav2vec_model)
# Initialize vocal separator if provided
vocal_separator = None
if vocal_separator_model is not None:
os.makedirs(cache_dir, exist_ok=True)
vocal_separator = Separator(
output_dir=cache_dir,
output_single_stem="vocals",
model_file_dir=os.path.dirname(vocal_separator_model),
)
vocal_separator.load_model(os.path.basename(vocal_separator_model))
#vocal_separator.load_model("UVR-MDX-NET-Inst_HQ_3.onnx")
assert vocal_separator.model_instance is not None, "Failed to load audio separation model."
# Perform vocal separation if applicable
if vocal_separator is not None:
outputs = vocal_separator.separate(wav_path)
assert len(outputs) > 0, "Audio separation failed."
vocal_audio_file = outputs[0]
vocal_audio_name, _ = os.path.splitext(vocal_audio_file)
vocal_audio_file = os.path.join(vocal_separator.output_dir, vocal_audio_file)
vocal_audio_file = resample_audio(
vocal_audio_file,
os.path.join(vocal_separator.output_dir, f"{vocal_audio_name}-16k.wav"),
sample_rate,
)
else:
vocal_audio_file = wav_path
# Load audio and extract Wav2Vec features
speech_array, sampling_rate = librosa.load(vocal_audio_file, sr=sample_rate)
audio_feature = np.squeeze(wav2vec_feature_extractor(speech_array, sampling_rate=sampling_rate).input_values)
audio_length = math.ceil(len(audio_feature) / sample_rate * fps)
audio_feature = torch.from_numpy(audio_feature).float().to(device=device)
# Pad audio features to match the required length
if num_generated_frames_per_clip > 0 and audio_length % num_generated_frames_per_clip != 0:
audio_feature = torch.nn.functional.pad(
audio_feature,
(
0,
(num_generated_frames_per_clip - audio_length % num_generated_frames_per_clip) * (sample_rate // fps),
),
"constant",
0.0,
)
audio_length += num_generated_frames_per_clip - audio_length % num_generated_frames_per_clip
audio_feature = audio_feature.unsqueeze(0)
# Extract audio embeddings
with torch.no_grad():
embeddings = audio_encoder(audio_feature, seq_len=audio_length, output_hidden_states=True)
assert len(embeddings) > 0, "Failed to extract audio embeddings."
audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0)
audio_emb = rearrange(audio_emb, "b s d -> s b d")
# Concatenate embeddings with surrounding frames
audio_emb = audio_emb.cpu().detach()
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)] for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
if vocal_separator is not None:
del vocal_separator
del audio_encoder
return audio_emb, audio_length
@torch.no_grad()
def extract_audio_emotion_labels(
model: str,
wav_path: str,
emotion2vec_model: str,
audio_length: int,
sample_rate: int = 16000,
device: str = "cuda",
):
"""
Extract audio emotion labels from an audio file.
Args:
model (str): Path to the MEMO model.
wav_path (str): Path to the input audio file.
emotion2vec_model (str): Path to the Emotion2vec model.
audio_length (int): Target length for interpolated emotion labels.
sample_rate (int, optional): Sample rate of the input audio. Default is 16000.
device (str, optional): Device to use ('cuda' or 'cpu'). Default is "cuda".
Returns:
torch.Tensor: Processed emotion labels with shape matching the target audio length.
"""
# Load models
logger.info("Downloading emotion2vec models from modelscope")
kwargs = download_model(model=emotion2vec_model)
kwargs["tokenizer"] = None
kwargs["input_size"] = None
kwargs["frontend"] = None
emotion_model = Emotion2vec(**kwargs, vocab_size=-1).to(device)
init_param = kwargs.get("init_param", None)
load_emotion2vec_model(
model=emotion_model,
path=init_param,
ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True),
oss_bucket=kwargs.get("oss_bucket", None),
scope_map=kwargs.get("scope_map", []),
)
emotion_model.eval()
classifier = AudioEmotionClassifierModel.from_pretrained(
model,
subfolder="misc/audio_emotion_classifier",
use_safetensors=True,
).to(device=device)
classifier.eval()
# Load audio
wav, sr = torchaudio.load(wav_path)
if sr != sample_rate:
wav = torchaudio.functional.resample(wav, sr, sample_rate)
wav = wav.view(-1) if wav.dim() == 1 else wav[0].view(-1)
emotion_labels = torch.full_like(wav, -1, dtype=torch.int32)
def extract_emotion(x):
"""
Extract emotion for a given audio segment.
"""
x = x.to(device=device)
x = F.layer_norm(x, x.shape).view(1, -1)
feats = emotion_model.extract_features(x)
x = feats["x"].mean(dim=1) # average across frames
x = classifier(x)
x = torch.softmax(x, dim=-1)
return torch.argmax(x, dim=-1)
# Process start, middle, and end segments
start_label = extract_emotion(wav[: sample_rate * 2]).item()
emotion_labels[:sample_rate] = start_label
for i in range(sample_rate, len(wav) - sample_rate, sample_rate):
mid_wav = wav[i - sample_rate : i - sample_rate + sample_rate * 3]
mid_label = extract_emotion(mid_wav).item()
emotion_labels[i : i + sample_rate] = mid_label
end_label = extract_emotion(wav[-sample_rate * 2 :]).item()
emotion_labels[-sample_rate:] = end_label
# Interpolate to match the target audio length
emotion_labels = emotion_labels.unsqueeze(0).unsqueeze(0).float()
emotion_labels = F.interpolate(emotion_labels, size=audio_length, mode="nearest").squeeze(0).squeeze(0).int()
num_emotion_classes = classifier.num_emotion_classes
del emotion_model
del classifier
return emotion_labels, num_emotion_classes
def load_emotion2vec_model(
path: str,
model: torch.nn.Module,
ignore_init_mismatch: bool = True,
map_location: str = "cpu",
oss_bucket=None,
scope_map=[],
):
obj = model
dst_state = obj.state_dict()
logger.debug(f"Emotion2vec checkpoint: {path}")
if oss_bucket is None:
src_state = torch.load(path, map_location=map_location)
else:
buffer = BytesIO(oss_bucket.get_object(path).read())
src_state = torch.load(buffer, map_location=map_location)
src_state = src_state["state_dict"] if "state_dict" in src_state else src_state
src_state = src_state["model_state_dict"] if "model_state_dict" in src_state else src_state
src_state = src_state["model"] if "model" in src_state else src_state
if isinstance(scope_map, str):
scope_map = scope_map.split(",")
scope_map += ["module.", "None"]
for k in dst_state.keys():
k_src = k
if scope_map is not None:
src_prefix = ""
dst_prefix = ""
for i in range(0, len(scope_map), 2):
src_prefix = scope_map[i] if scope_map[i].lower() != "none" else ""
dst_prefix = scope_map[i + 1] if scope_map[i + 1].lower() != "none" else ""
if dst_prefix == "" and (src_prefix + k) in src_state.keys():
k_src = src_prefix + k
if not k_src.startswith("module."):
logger.debug(f"init param, map: {k} from {k_src} in ckpt")
elif k.startswith(dst_prefix) and k.replace(dst_prefix, src_prefix, 1) in src_state.keys():
k_src = k.replace(dst_prefix, src_prefix, 1)
if not k_src.startswith("module."):
logger.debug(f"init param, map: {k} from {k_src} in ckpt")
if k_src in src_state.keys():
if ignore_init_mismatch and dst_state[k].shape != src_state[k_src].shape:
logger.debug(
f"ignore_init_mismatch:{ignore_init_mismatch}, dst: {k, dst_state[k].shape}, src: {k_src, src_state[k_src].shape}"
)
else:
dst_state[k] = src_state[k_src]
else:
logger.debug(f"Warning, miss key in ckpt: {k}, mapped: {k_src}")
obj.load_state_dict(dst_state, strict=True)
|