CosyVoice-300M / css /utils.py
wenmengzhou's picture
Update css/utils.py
94b950a verified
raw
history blame
2.12 kB
import os
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
import torch
import numpy as np
import random
import librosa
from cosyvoice.utils.file_utils import load_wav
from cosyvoice.cli.cosyvoice import CosyVoice
cosyvoice= CosyVoice('FunAudioLLM/CosyVoice-300M')
cosyvoice_sft= CosyVoice('FunAudioLLM/CosyVoice-300M-SFT')
cosyvoice_instruct= CosyVoice('FunAudioLLM/CosyVoice-300M-Instruct')
example_tts_text = [[
"Every step we take is part of our strategy; everything you see, including the conversation I am having with you at this moment, every action I take, every word I speak, has a profound meaning.",
"That comedian is really talented; as soon as he opens his mouth, he makes the whole audience burst into laughter.",
"The prank he played made everyone unable to help but laugh."
]
example_prompt_text = ["我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。",
"I am a newly launched generative speech large model by the Qwen Voice Team of the Tongyi Laboratory, offering comfortable and natural text-to-speech synthesis capabilities."]
prompt_sr, target_sr = 16000, 22050
default_data = np.zeros(target_sr)
def set_all_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
max_val = 0.8
def postprocess(speech, top_db=60, hop_length=220, win_length=440):
speech, _ = librosa.effects.trim(
speech, top_db=top_db,
frame_length=win_length,
hop_length=hop_length
)
if speech.abs().max() > max_val:
speech = speech / speech.abs().max() * max_val
speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1)
return speech
def use_instruct(text):
for symbol in ['<endofprompt>', '<laughter>', '</laughter>', '<strong>', '</strong>', '[laughter]', '[breath]']:
if symbol in text:
return True
return False