Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,603 Bytes
6faeba1 6a79837 6faeba1 28ea968 80a3f4b 6faeba1 c255993 6faeba1 6a79837 0d0e8ce 28ea968 85bbd7c 6faeba1 28ea968 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import os
import torch
from InferenceInterfaces.ToucanTTSInterface import ToucanTTSInterface
from Modules.ControllabilityGAN.GAN import GanWrapper
from Utility.storage_config import MODELS_DIR
class ControllableInterface:
def __init__(self, gpu_id="cpu", available_artificial_voices=1000):
if gpu_id == "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = ""
else:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu_id}"
self.device = "cuda" if gpu_id != "cpu" else "cpu"
self.model = ToucanTTSInterface(device=self.device, tts_model_path=None)
self.model.set_utterance_embedding("female_high_voice.wav")
def read(self,
prompt,
loudness_in_db
):
phones = self.model.text2phone.get_phone_string(prompt)
if len(phones) > 1800:
prompt = "Your input was too long. Please try either a shorter text or split it into several parts."
print(prompt + "\n\n")
wav, sr = self.model(phones,
input_is_phones=True,
duration_scaling_factor=1.0,
pitch_variance_scale=1.0,
energy_variance_scale=1.0,
pause_duration_scaling_factor=1.0,
return_plot_as_filepath=False,
prosody_creativity=0.2,
loudness_in_db=loudness_in_db)
return sr, wav
|