|
import asyncio |
|
import datetime |
|
import logging |
|
import os |
|
import time |
|
import traceback |
|
|
|
import edge_tts |
|
import gradio as gr |
|
import librosa |
|
import torch |
|
from fairseq import checkpoint_utils |
|
|
|
from config import Config |
|
from lib.infer_pack.models import ( |
|
SynthesizerTrnMs256NSFsid, |
|
SynthesizerTrnMs256NSFsid_nono, |
|
SynthesizerTrnMs768NSFsid, |
|
SynthesizerTrnMs768NSFsid_nono, |
|
) |
|
from rmvpe import RMVPE |
|
from vc_infer_pipeline import VC |
|
|
|
|
|
logging.getLogger("fairseq").setLevel(logging.WARNING) |
|
logging.getLogger("numba").setLevel(logging.WARNING) |
|
logging.getLogger("markdown_it").setLevel(logging.WARNING) |
|
logging.getLogger("urllib3").setLevel(logging.WARNING) |
|
logging.getLogger("matplotlib").setLevel(logging.WARNING) |
|
|
|
limitation = os.getenv("SYSTEM") == "spaces" |
|
|
|
config = Config() |
|
|
|
|
|
edge_output_filename = "edge_output.mp3" |
|
tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) |
|
tts_voices = ["mn-MN-BataaNeural", "mn-MN-YesuiNeural"] |
|
|
|
|
|
model_root = "weights" |
|
models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")] |
|
models.sort() |
|
|
|
|
|
def model_data(model_name): |
|
|
|
pass |
|
|
|
|
|
def load_hubert(): |
|
|
|
pass |
|
|
|
|
|
def tts( |
|
model_name, |
|
speed, |
|
tts_text, |
|
tts_voice, |
|
f0_up_key, |
|
f0_method, |
|
index_rate, |
|
protect, |
|
filter_radius=3, |
|
resample_sr=0, |
|
rms_mix_rate=0.25, |
|
): |
|
|
|
pass |
|
|
|
|
|
print("Loading hubert model...") |
|
hubert_model = load_hubert() |
|
print("Hubert model loaded.") |
|
|
|
print("Loading rmvpe model...") |
|
rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device) |
|
print("rmvpe model loaded.") |
|
|
|
initial_md = """ |
|
# RVC text-to-speech demo |
|
This is a text-to-speech demo of RVC moe models of [rvc_okiba](https://huggingface.co/litagin/rvc_okiba) using [edge-tts](https://github.com/rany2/edge-tts). |
|
Input text ➡[(edge-tts)](https://github.com/rany2/edge-tts)➡ Speech mp3 file ➡[(RVC)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)➡ Final output |
|
This runs on the 🤗 server's cpu, so it may be slow. |
|
Although the models are trained on Japanese voices and intended for Japanese text, they can also be used with other languages with the corresponding edge-tts speaker (but possibly with a Japanese accent). |
|
Input characters are limited to 280 characters, and the speech audio is limited to 20 seconds in this 🤗 space. |
|
[Visit this GitHub repo](https://github.com/litagin02/rvc-tts-webui) for running locally with your models and GPU! |
|
""" |
|
|
|
app = gr.Blocks() |
|
with app: |
|
with gr.Row(): |
|
with gr.Column(): |
|
model_name = gr.Dropdown( |
|
label="Model (all models except man-_ are girl models)", |
|
choices=models, |
|
value=models[0], |
|
) |
|
f0_key_up = gr.Number( |
|
label="Tune (+12 = 1 octave up from edge-tts, the best value depends on the models and speakers)", |
|
value=0, |
|
) |
|
with gr.Column(): |
|
f0_method = gr.Radio( |
|
label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)", |
|
choices=["pm", "rmvpe"], |
|
value="rmvpe", |
|
interactive=True, |
|
) |
|
index_rate = gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
label="Slang rate", |
|
value=0.75, |
|
interactive=True, |
|
) |
|
protect0 = gr.Slider( |
|
minimum=0, |
|
maximum=0.5, |
|
label="Protect", |
|
value=0.33, |
|
step=0.01, |
|
interactive=True, |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
tts_voice = gr.Dropdown( |
|
label="Edge-tts speaker (format: language-Country-Name-Gender)", |
|
choices=tts_voices, |
|
allow_custom_value=False, |
|
value="mn-MN-BataaNeural", |
|
) |
|
speed = gr.Slider( |
|
minimum=-100, |
|
maximum=100, |
|
label="Speech speed (%)", |
|
value=0, |
|
step=10, |
|
interactive=True, |
|
) |
|
tts_text = gr.Textbox(label="Input Text", value="Текстыг оруулна уу.") |
|
with gr.Column(): |
|
but0 = gr.Button("Convert", variant="primary") |
|
info_text = gr.Textbox(label="Output info") |
|
with gr.Column(): |
|
edge_tts_output = gr.Audio(label="Edge Voice", type="filepath") |
|
tts_output = gr.Audio(label="Result") |
|
but0.click( |
|
tts, |
|
[ |
|
model_name, |
|
speed, |
|
tts_text, |
|
tts_voice, |
|
f0_key_up, |
|
f0_method, |
|
index_rate, |
|
protect0, |
|
], |
|
[info_text, edge_tts_output, tts_output], |
|
) |
|
|
|
|
|
app.launch(enable_api=True, share=True) |
|
|
|
|