import librosa | |
from model_clap import ClapSE | |
from model_meta_voice import MetaVoiceSE | |
from model_pyannote_embedding import PyannoteSE | |
from model_w2v_bert import W2VBertSE | |
def test(): | |
wav, sr = librosa.load("sample.wav") | |
print("CLAP") | |
model = ClapSE() | |
v = model.get_speaker_embedding(wav, sr) | |
print(v.shape) | |
# print("MetaVoiceSE") | |
# model = MetaVoiceSE() | |
# v = model.get_speaker_embedding(wav, sr) | |
# print(v.shape) | |
# print("PyannoteSE") | |
# model = PyannoteSE() | |
# v = model.get_speaker_embedding(wav, sr) | |
# print(v.shape) | |
# print("W2VBertSE") | |
# model = W2VBertSE() | |
# v = model.get_speaker_embedding(wav, sr) | |
# print(v.shape) | |
if __name__ == '__main__': | |
test() | |