Spaces:
Running
on
Zero
Running
on
Zero
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue) | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import os | |
import sys | |
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR)) | |
import argparse | |
import gradio as gr | |
import numpy as np | |
import torch | |
import torchaudio | |
import random | |
import librosa | |
import logging | |
logging.getLogger('matplotlib').setLevel(logging.WARNING) | |
from cosyvoice.cli.cosyvoice import CosyVoice | |
from cosyvoice.utils.file_utils import load_wav, speed_change | |
logging.basicConfig(level=logging.DEBUG, | |
format='%(asctime)s %(levelname)s %(message)s') | |
def generate_seed(): | |
seed = random.randint(1, 100000000) | |
return { | |
"__type__": "update", | |
"value": seed | |
} | |
def set_all_random_seed(seed): | |
random.seed(seed) | |
np.random.seed(seed) | |
torch.manual_seed(seed) | |
torch.cuda.manual_seed_all(seed) | |
max_val = 0.8 | |
def postprocess(speech, top_db=60, hop_length=220, win_length=440): | |
speech, _ = librosa.effects.trim( | |
speech, top_db=top_db, | |
frame_length=win_length, | |
hop_length=hop_length | |
) | |
if speech.abs().max() > max_val: | |
speech = speech / speech.abs().max() * max_val | |
speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1) | |
return speech | |
inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制'] | |
instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮', | |
'3s极速复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 输入prompt文本\n3. 点击生成音频按钮', | |
'跨语种复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 点击生成音频按钮', | |
'自然语言控制': '1. 选择预训练音色\n2. 输入instruct文本\n3. 点击生成音频按钮'} | |
def change_instruction(mode_checkbox_group): | |
return instruct_dict[mode_checkbox_group] | |
def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed, speed_factor): | |
if prompt_wav_upload is not None: | |
prompt_wav = prompt_wav_upload | |
elif prompt_wav_record is not None: | |
prompt_wav = prompt_wav_record | |
else: | |
prompt_wav = None | |
# if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode | |
if mode_checkbox_group in ['自然语言控制']: | |
if cosyvoice.frontend.instruct is False: | |
gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir)) | |
return (target_sr, default_data) | |
if instruct_text == '': | |
gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本') | |
return (target_sr, default_data) | |
if prompt_wav is not None or prompt_text != '': | |
gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略') | |
# if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language | |
if mode_checkbox_group in ['跨语种复刻']: | |
if cosyvoice.frontend.instruct is True: | |
gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir)) | |
return (target_sr, default_data) | |
if instruct_text != '': | |
gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略') | |
if prompt_wav is None: | |
gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频') | |
return (target_sr, default_data) | |
gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言') | |
# if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements | |
if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']: | |
if prompt_wav is None: | |
gr.Warning('prompt音频为空,您是否忘记输入prompt音频?') | |
return (target_sr, default_data) | |
if torchaudio.info(prompt_wav).sample_rate < prompt_sr: | |
gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr)) | |
return (target_sr, default_data) | |
# sft mode only use sft_dropdown | |
if mode_checkbox_group in ['预训练音色']: | |
if instruct_text != '' or prompt_wav is not None or prompt_text != '': | |
gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!') | |
# zero_shot mode only use prompt_wav prompt text | |
if mode_checkbox_group in ['3s极速复刻']: | |
if prompt_text == '': | |
gr.Warning('prompt文本为空,您是否忘记输入prompt文本?') | |
return (target_sr, default_data) | |
if instruct_text != '': | |
gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!') | |
if mode_checkbox_group == '预训练音色': | |
logging.info('get sft inference request') | |
set_all_random_seed(seed) | |
output = cosyvoice.inference_sft(tts_text, sft_dropdown) | |
elif mode_checkbox_group == '3s极速复刻': | |
logging.info('get zero_shot inference request') | |
prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) | |
set_all_random_seed(seed) | |
output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k) | |
elif mode_checkbox_group == '跨语种复刻': | |
logging.info('get cross_lingual inference request') | |
prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) | |
set_all_random_seed(seed) | |
output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k) | |
else: | |
logging.info('get instruct inference request') | |
set_all_random_seed(seed) | |
output = cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text) | |
if speed_factor != 1.0: | |
try: | |
audio_data, sample_rate = speed_change(output["tts_speech"], target_sr, str(speed_factor)) | |
audio_data = audio_data.numpy().flatten() | |
except Exception as e: | |
print(f"Failed to change speed of audio: \n{e}") | |
else: | |
audio_data = output['tts_speech'].numpy().flatten() | |
return (target_sr, audio_data) | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)") | |
gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作") | |
tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。") | |
speed_factor = gr.Slider(minimum=0.25, maximum=4, step=0.05, label="语速调节", value=1.0, interactive=True) | |
with gr.Row(): | |
mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0]) | |
instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5) | |
sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25) | |
with gr.Column(scale=0.25): | |
seed_button = gr.Button(value="\U0001F3B2") | |
seed = gr.Number(value=0, label="随机推理种子") | |
with gr.Row(): | |
prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='选择prompt音频文件,注意采样率不低于16khz') | |
prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件') | |
prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本,需与prompt音频内容一致,暂时不支持自动识别...", value='') | |
instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='') | |
generate_button = gr.Button("生成音频") | |
audio_output = gr.Audio(label="合成音频") | |
seed_button.click(generate_seed, inputs=[], outputs=seed) | |
generate_button.click(generate_audio, | |
inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed, speed_factor], | |
outputs=[audio_output]) | |
mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text]) | |
demo.queue(max_size=4, default_concurrency_limit=2) | |
demo.launch(server_name='0.0.0.0', server_port=args.port) | |
if __name__ == '__main__': | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--port', | |
type=int, | |
default=8000) | |
parser.add_argument('--model_dir', | |
type=str, | |
default='iic/CosyVoice-300M', | |
help='local path or modelscope repo id') | |
args = parser.parse_args() | |
cosyvoice = CosyVoice(args.model_dir) | |
sft_spk = cosyvoice.list_avaliable_spks() | |
prompt_sr, target_sr = 16000, 22050 | |
default_data = np.zeros(target_sr) | |
main() | |