Spaces:
Running
on
Zero
Running
on
Zero
import io | |
import math | |
from queue import Queue | |
from threading import Thread | |
from typing import Optional | |
import os | |
import numpy as np | |
import spaces | |
import gradio as gr | |
import torch | |
from gradio_webrtc import WebRTC | |
from gradio_webrtc import WebRTC | |
from twilio.rest import Client | |
from parler_tts import ParlerTTSForConditionalGeneration | |
from pydub import AudioSegment | |
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed | |
from transformers.generation.streamers import BaseStreamer | |
account_sid = os.environ.get("TWILIO_ACCOUNT_SID") | |
auth_token = os.environ.get("TWILIO_AUTH_TOKEN") | |
if account_sid and auth_token: | |
client = Client(account_sid, auth_token) | |
token = client.tokens.create() | |
rtc_configuration = { | |
"iceServers": token.ice_servers, | |
"iceTransportPolicy": "relay", | |
} | |
else: | |
rtc_configuration = None | |
device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | |
torch_dtype = torch.float16 if device != "cpu" else torch.float32 | |
repo_id = "parler-tts/parler_tts_mini_v0.1" | |
jenny_repo_id = "parler-tts/parler-tts-mini-jenny-30H" | |
model = ParlerTTSForConditionalGeneration.from_pretrained( | |
repo_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True | |
).to(device) | |
tokenizer = AutoTokenizer.from_pretrained(repo_id) | |
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id) | |
SAMPLE_RATE = feature_extractor.sampling_rate | |
SEED = 42 | |
default_text = "Please surprise me and speak in whatever voice you enjoy." | |
examples = [ | |
[ | |
"Remember - this is only the first iteration of the model! To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data by a factor of five times.", | |
"A male speaker with a low-pitched voice delivering his words at a fast pace in a small, confined space with a very clear audio and an animated tone.", | |
0.2 | |
], | |
[ | |
"'This is the best time of my life, Bartley,' she said happily.", | |
"A female speaker with a slightly low-pitched, quite monotone voice delivers her words at a slightly faster-than-average pace in a confined space with very clear audio.", | |
0.2 | |
], | |
[ | |
"Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.", | |
"A male speaker with a slightly high-pitched voice delivering his words at a slightly slow pace in a small, confined space with a touch of background noise and a quite monotone tone.", | |
0.2 | |
], | |
[ | |
"Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.", | |
"A male speaker with a low-pitched voice delivers his words at a fast pace and an animated tone, in a very spacious environment, accompanied by noticeable background noise.", | |
0.2 | |
], | |
] | |
jenny_examples = [ | |
[ | |
"Remember, this is only the first iteration of the model! To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data by a factor of five times.", | |
"Jenny speaks at an average pace with a slightly animated delivery in a very confined sounding environment with clear audio quality.", | |
0.2 | |
], | |
[ | |
"'This is the best time of my life, Bartley,' she said happily.", | |
"Jenny speaks in quite a monotone voice at a slightly faster-than-average pace in a confined space with very clear audio.", | |
0.2 | |
], | |
[ | |
"Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.", | |
"Jenny delivers her words at a slightly slow pace in a small, confined space with a touch of background noise and a quite monotone tone.", | |
0.2 | |
], | |
[ | |
"Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.", | |
"Jenny delivers her words at a fast pace and an animated tone, in a very spacious environment, accompanied by noticeable background noise.", | |
0.2 | |
], | |
] | |
class ParlerTTSStreamer(BaseStreamer): | |
def __init__( | |
self, | |
model: ParlerTTSForConditionalGeneration, | |
device: Optional[str] = None, | |
play_steps: Optional[int] = 10, | |
stride: Optional[int] = None, | |
timeout: Optional[float] = None, | |
): | |
""" | |
Streamer that stores playback-ready audio in a queue, to be used by a downstream application as an iterator. This is | |
useful for applications that benefit from accessing the generated audio in a non-blocking way (e.g. in an interactive | |
Gradio demo). | |
Parameters: | |
model (`ParlerTTSForConditionalGeneration`): | |
The Parler-TTS model used to generate the audio waveform. | |
device (`str`, *optional*): | |
The torch device on which to run the computation. If `None`, will default to the device of the model. | |
play_steps (`int`, *optional*, defaults to 10): | |
The number of generation steps with which to return the generated audio array. Using fewer steps will | |
mean the first chunk is ready faster, but will require more codec decoding steps overall. This value | |
should be tuned to your device and latency requirements. | |
stride (`int`, *optional*): | |
The window (stride) between adjacent audio samples. Using a stride between adjacent audio samples reduces | |
the hard boundary between them, giving smoother playback. If `None`, will default to a value equivalent to | |
play_steps // 6 in the audio space. | |
timeout (`int`, *optional*): | |
The timeout for the audio queue. If `None`, the queue will block indefinitely. Useful to handle exceptions | |
in `.generate()`, when it is called in a separate thread. | |
""" | |
self.decoder = model.decoder | |
self.audio_encoder = model.audio_encoder | |
self.generation_config = model.generation_config | |
self.device = device if device is not None else model.device | |
# variables used in the streaming process | |
self.play_steps = play_steps | |
if stride is not None: | |
self.stride = stride | |
else: | |
hop_length = math.floor(self.audio_encoder.config.sampling_rate / self.audio_encoder.config.frame_rate) | |
self.stride = hop_length * (play_steps - self.decoder.num_codebooks) // 6 | |
self.token_cache = None | |
self.to_yield = 0 | |
# varibles used in the thread process | |
self.audio_queue = Queue() | |
self.stop_signal = None | |
self.timeout = timeout | |
def apply_delay_pattern_mask(self, input_ids): | |
# build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Parler) | |
_, delay_pattern_mask = self.decoder.build_delay_pattern_mask( | |
input_ids[:, :1], | |
bos_token_id=self.generation_config.bos_token_id, | |
pad_token_id=self.generation_config.decoder_start_token_id, | |
max_length=input_ids.shape[-1], | |
) | |
# apply the pattern mask to the input ids | |
input_ids = self.decoder.apply_delay_pattern_mask(input_ids, delay_pattern_mask) | |
# revert the pattern delay mask by filtering the pad token id | |
mask = (delay_pattern_mask != self.generation_config.bos_token_id) & (delay_pattern_mask != self.generation_config.pad_token_id) | |
input_ids = input_ids[mask].reshape(1, self.decoder.num_codebooks, -1) | |
# append the frame dimension back to the audio codes | |
input_ids = input_ids[None, ...] | |
# send the input_ids to the correct device | |
input_ids = input_ids.to(self.audio_encoder.device) | |
decode_sequentially = ( | |
self.generation_config.bos_token_id in input_ids | |
or self.generation_config.pad_token_id in input_ids | |
or self.generation_config.eos_token_id in input_ids | |
) | |
if not decode_sequentially: | |
output_values = self.audio_encoder.decode( | |
input_ids, | |
audio_scales=[None], | |
) | |
else: | |
sample = input_ids[:, 0] | |
sample_mask = (sample >= self.audio_encoder.config.codebook_size).sum(dim=(0, 1)) == 0 | |
sample = sample[:, :, sample_mask] | |
output_values = self.audio_encoder.decode(sample[None, ...], [None]) | |
audio_values = output_values.audio_values[0, 0] | |
return audio_values.cpu().float().numpy() | |
def put(self, value): | |
batch_size = value.shape[0] // self.decoder.num_codebooks | |
if batch_size > 1: | |
raise ValueError("ParlerTTSStreamer only supports batch size 1") | |
if self.token_cache is None: | |
self.token_cache = value | |
else: | |
self.token_cache = torch.concatenate([self.token_cache, value[:, None]], dim=-1) | |
if self.token_cache.shape[-1] % self.play_steps == 0: | |
audio_values = self.apply_delay_pattern_mask(self.token_cache) | |
self.on_finalized_audio(audio_values[self.to_yield : -self.stride]) | |
self.to_yield += len(audio_values) - self.to_yield - self.stride | |
def end(self): | |
"""Flushes any remaining cache and appends the stop symbol.""" | |
if self.token_cache is not None: | |
audio_values = self.apply_delay_pattern_mask(self.token_cache) | |
else: | |
audio_values = np.zeros(self.to_yield) | |
self.on_finalized_audio(audio_values[self.to_yield :], stream_end=True) | |
def on_finalized_audio(self, audio: np.ndarray, stream_end: bool = False): | |
"""Put the new audio in the queue. If the stream is ending, also put a stop signal in the queue.""" | |
self.audio_queue.put(audio, timeout=self.timeout) | |
if stream_end: | |
self.audio_queue.put(self.stop_signal, timeout=self.timeout) | |
def __iter__(self): | |
return self | |
def __next__(self): | |
value = self.audio_queue.get(timeout=self.timeout) | |
if not isinstance(value, np.ndarray) and value == self.stop_signal: | |
raise StopIteration() | |
else: | |
return value | |
sampling_rate = model.audio_encoder.config.sampling_rate | |
frame_rate = model.audio_encoder.config.frame_rate | |
def numpy_to_mp3(audio_array, sampling_rate): | |
# Normalize audio_array if it's floating-point | |
if np.issubdtype(audio_array.dtype, np.floating): | |
max_val = np.max(np.abs(audio_array)) | |
audio_array = (audio_array / max_val) * 32767 # Normalize to 16-bit range | |
audio_array = audio_array.astype(np.int16) | |
# Create an audio segment from the numpy array | |
audio_segment = AudioSegment( | |
audio_array.tobytes(), | |
frame_rate=sampling_rate, | |
sample_width=audio_array.dtype.itemsize, | |
channels=1 | |
) | |
# Export the audio segment to MP3 bytes - use a high bitrate to maximise quality | |
mp3_io = io.BytesIO() | |
audio_segment.export(mp3_io, format="mp3", bitrate="320k") | |
# Get the MP3 bytes | |
mp3_bytes = mp3_io.getvalue() | |
mp3_io.close() | |
gr.Info(f"Sample of length {round(audio_array.shape[0] / sampling_rate, 2)} seconds ready") | |
return mp3_bytes | |
def generate_base(text, description, play_steps_in_s=2.0): | |
play_steps = int(frame_rate * play_steps_in_s) | |
streamer = ParlerTTSStreamer(model, device=device, play_steps=play_steps) | |
inputs = tokenizer(description, return_tensors="pt").to(device) | |
prompt = tokenizer(text, return_tensors="pt").to(device) | |
generation_kwargs = dict( | |
input_ids=inputs.input_ids, | |
prompt_input_ids=prompt.input_ids, | |
streamer=streamer, | |
do_sample=True, | |
temperature=1.0, | |
min_new_tokens=10, | |
) | |
set_seed(SEED) | |
thread = Thread(target=model.generate, kwargs=generation_kwargs) | |
thread.start() | |
import time | |
start = time.time() | |
total_length = 0 | |
previous = time.time() | |
for i, new_audio in enumerate(streamer): | |
if i == 0: | |
gr.Info("First generation done") | |
new_audio = new_audio.reshape(1, -1) | |
segment_length = round(new_audio.shape[1] / sampling_rate, 2) | |
total_length += segment_length | |
now = time.time() | |
print(f"Sample {i} done. {segment_length} seconds generated in {round(now - previous, 2)}. So far, {round(total_length, 2)} seconds have been generated in {round(now - start, 2)} seconds") | |
previous = now | |
yield (sampling_rate, new_audio) | |
css = """ | |
#share-btn-container { | |
display: flex; | |
padding-left: 0.5rem !important; | |
padding-right: 0.5rem !important; | |
background-color: #000000; | |
justify-content: center; | |
align-items: center; | |
border-radius: 9999px !important; | |
width: 13rem; | |
margin-top: 10px; | |
margin-left: auto; | |
flex: unset !important; | |
} | |
#share-btn { | |
all: initial; | |
color: #ffffff; | |
font-weight: 600; | |
cursor: pointer; | |
font-family: 'IBM Plex Sans', sans-serif; | |
margin-left: 0.5rem !important; | |
padding-top: 0.25rem !important; | |
padding-bottom: 0.25rem !important; | |
right:0; | |
} | |
#share-btn * { | |
all: unset !important; | |
} | |
#share-btn-container div:nth-child(-n+2){ | |
width: auto !important; | |
min-height: 0px !important; | |
} | |
#share-btn-container .wrap { | |
display: none !important; | |
} | |
""" | |
with gr.Blocks(css=css) as block: | |
gr.HTML( | |
""" | |
<div style="text-align: center; max-width: 700px; margin: 0 auto;"> | |
<div | |
style=" | |
display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem; | |
" | |
> | |
<h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;"> | |
Parler-TTS 🗣️ | |
</h1> | |
</div> | |
</div> | |
""" | |
) | |
gr.HTML( | |
f""" | |
<p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for | |
high-fidelity text-to-speech (TTS) models. Two models are demonstrated here, <a href="https://huggingface.co/parler-tts/parler_tts_mini_v0.1"> Parler-TTS Mini v0.1</a>, | |
is the first iteration model trained using 10k hours of narrated audiobooks, and <a href="https://huggingface.co/ylacombe/parler-tts-mini-jenny-30H"> Parler-TTS Jenny</a>, | |
a model fine-tuned on the <a href="https://huggingface.co/datasets/reach-vb/jenny_tts_dataset"> Jenny dataset</a>. | |
Both models generates high-quality speech with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p> | |
<p>Tips for ensuring good generation: | |
<ul> | |
<li>Include the term <b>"very clear audio"</b> to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li> | |
<li>When using the fine-tuned model, include the term <b>"Jenny"</b> to pick out her voice</li> | |
<li>Punctuation can be used to control the prosody of the generations, e.g. use commas to add small breaks in speech</li> | |
<li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li> | |
</ul> | |
</p> | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text") | |
description = gr.Textbox(label="Description", lines=2, value="", elem_id="input_description") | |
play_seconds = gr.Slider(0.2, 3.0, value=0.2, step=0.2, label="Streaming interval in seconds", info="Lower = shorter chunks, lower latency, more codec steps") | |
run_button = gr.Button("Generate Audio", variant="primary") | |
with gr.Column(): | |
audio_out = WebRTC(label="Parler-TTS generation", modality="audio", mode="receive", | |
rtc_configuration=rtc_configuration) | |
inputs = [input_text, description, play_seconds] | |
outputs = [audio_out] | |
gr.Examples(examples=examples, fn=generate_base, inputs=inputs, outputs=outputs, cache_examples=False) | |
audio_out.stream(fn=generate_base, inputs=inputs, outputs=audio_out, trigger=run_button.click) | |
gr.HTML( | |
""" | |
<p>To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data to 50k hours of speech. | |
The v1 release of the model will be trained on this data, as well as inference optimisations, such as flash attention | |
and torch compile, that will improve the latency by 2-4x. If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the | |
<a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub. The Parler-TTS codebase and its | |
associated checkpoints are licensed under <a href='https://github.com/huggingface/parler-tts?tab=Apache-2.0-1-ov-file#readme'> Apache 2.0</a>.</p> | |
""" | |
) | |
block.queue() | |
block.launch(share=True, ssr_mode=False) | |