ylacombe HF staff commited on
Commit
cf93985
1 Parent(s): 311d249

- v02 (d95adfe36996e391bf9178e2e3189b24588ca230)

Files changed (1) hide show
  1. app.py +52 -28
app.py CHANGED
@@ -12,9 +12,11 @@ from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
 
14
 
15
- repo_id = "parler-tts/parler_tts_mini_v0.1"
 
16
 
17
  model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
 
18
  tokenizer = AutoTokenizer.from_pretrained(repo_id)
19
  feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
20
 
@@ -22,23 +24,33 @@ feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
22
  SAMPLE_RATE = feature_extractor.sampling_rate
23
  SEED = 42
24
 
25
- default_text = "Please surprise me and speak in whatever voice you enjoy."
 
26
  examples = [
27
  [
28
- "Remember - this is only the first iteration of the model! To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data by a factor of five times.",
29
- "A male speaker with a low-pitched voice delivering his words at a fast pace in a small, confined space with a very clear audio and an animated tone."
 
 
 
 
 
 
30
  ],
31
  [
32
  "'This is the best time of my life, Bartley,' she said happily.",
33
- "A female speaker with a slightly low-pitched, quite monotone voice delivers her words at a slightly faster-than-average pace in a confined space with very clear audio.",
 
34
  ],
35
  [
36
  "Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.",
37
- "A male speaker with a slightly high-pitched voice delivering his words at a slightly slow pace in a small, confined space with a touch of background noise and a quite monotone tone.",
 
38
  ],
39
  [
40
- "Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.",
41
- "A male speaker with a low-pitched voice delivers his words at a fast pace and an animated tone, in a very spacious environment, accompanied by noticeable background noise.",
 
42
  ],
43
  ]
44
 
@@ -64,14 +76,19 @@ def preprocess(text):
64
  return text
65
 
66
  @spaces.GPU
67
- def gen_tts(text, description):
68
- inputs = tokenizer(description, return_tensors="pt").to(device)
69
  prompt = tokenizer(preprocess(text), return_tensors="pt").to(device)
70
 
71
  set_seed(SEED)
72
- generation = model.generate(
73
- input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, do_sample=True, temperature=1.0
74
- )
 
 
 
 
 
75
  audio_arr = generation.cpu().numpy().squeeze()
76
 
77
  return SAMPLE_RATE, audio_arr
@@ -132,41 +149,48 @@ with gr.Blocks(css=css) as block:
132
  gr.HTML(
133
  f"""
134
  <p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for
135
- high-fidelity text-to-speech (TTS) models. The model demonstrated here, <a href="https://huggingface.co/parler-tts/parler_tts_mini_v0.1"> Parler-TTS Mini v0.1</a>,
136
- is the first iteration model trained using 10k hours of narrated audiobooks. It generates high-quality speech
 
137
  with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p>
138
 
139
- <p>Tips for ensuring good generation:
140
- <ul>
141
- <li>Include the term "very clear audio" to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li>
142
- <li>Punctuation can be used to control the prosody of the generations, e.g. use commas to add small breaks in speech</li>
143
- <li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
144
- </ul>
145
- </p>
146
  """
147
  )
148
  with gr.Row():
149
  with gr.Column():
150
  input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text")
151
- description = gr.Textbox(label="Description", lines=2, value="", elem_id="input_description")
 
152
  run_button = gr.Button("Generate Audio", variant="primary")
153
  with gr.Column():
154
  audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out")
155
 
156
- inputs = [input_text, description]
157
  outputs = [audio_out]
158
- gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
159
  run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
 
160
  gr.HTML(
161
  """
162
- <p>To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data to 50k hours of speech.
163
- The v1 release of the model will be trained on this data, as well as inference optimisations, such as flash attention
164
- and torch compile, that will improve the latency by 2-4x. If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the
 
 
 
 
 
 
 
 
165
  <a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub.</p>
166
 
167
  <p>The Parler-TTS codebase and its associated checkpoints are licensed under <a href='https://github.com/huggingface/parler-tts?tab=Apache-2.0-1-ov-file#readme'> Apache 2.0</a>.</p>
168
  """
169
  )
170
 
 
171
  block.queue()
172
  block.launch(share=True)
 
12
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
 
14
 
15
+ repo_id = "parler-tts/parler-tts-mini-v1"
16
+ repo_id_large = "ylacombe/parler-large-v1-og"
17
 
18
  model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
19
+ model_large = ParlerTTSForConditionalGeneration.from_pretrained(repo_id_large).to(device)
20
  tokenizer = AutoTokenizer.from_pretrained(repo_id)
21
  feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)
22
 
 
24
  SAMPLE_RATE = feature_extractor.sampling_rate
25
  SEED = 42
26
 
27
+ default_text = "All of the data, pre-processing, training code, and weights are released publicly under a permissive license, enabling the community to build on our work and develop their own powerful models."
28
+ default_description = "Laura's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise."
29
  examples = [
30
  [
31
+ "This version introduces speaker consistency across generations, characterized by their name. For example, Jon, Lea, Gary, Jenna, Mike and Laura.",
32
+ "Gary's voice is monotone yet slightly fast in delivery, with a very close recording that has no background noise.",
33
+ None,
34
+ ],
35
+ [
36
+ '''There's 34 speakers. To take advantage of this, simply adapt your text description to specify which speaker to use: "Mike speaks animatedly...".''',
37
+ "Gary speaks slightly animatedly and slightly slowly in delivery, with a very close recording that has no background noise.",
38
+ None
39
  ],
40
  [
41
  "'This is the best time of my life, Bartley,' she said happily.",
42
+ "A female speaker delivers a slightly expressive and animated speech with a moderate speed. The recording features a low-pitch voice and slight background noise, creating a close-sounding audio experience.",
43
+ None,
44
  ],
45
  [
46
  "Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.",
47
+ "A man voice speaks slightly slowly with very noisy background, carrying a low-pitch tone and displaying a touch of expressiveness and animation. The sound is very distant, adding an air of intrigue.",
48
+ None
49
  ],
50
  [
51
+ "Once upon a time, in the depth of winter, when the flakes of snow fell like feathers from the clouds, a queen sat sewing at her pal-ace window, which had a carved frame of black wood.",
52
+ "In a very poor recording quality, a female speaker delivers her slightly expressive and animated words with a fast pace. There's high level of background noise and a very distant-sounding reverberation. Her voice is slightly higher pitched than average.",
53
+ None,
54
  ],
55
  ]
56
 
 
76
  return text
77
 
78
  @spaces.GPU
79
+ def gen_tts(text, description, use_large=False):
80
+ inputs = tokenizer(description.strip(), return_tensors="pt").to(device)
81
  prompt = tokenizer(preprocess(text), return_tensors="pt").to(device)
82
 
83
  set_seed(SEED)
84
+ if use_large:
85
+ generation = model_large.generate(
86
+ input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask, prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
87
+ )
88
+ else:
89
+ generation = model.generate(
90
+ input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask, prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
91
+ )
92
  audio_arr = generation.cpu().numpy().squeeze()
93
 
94
  return SAMPLE_RATE, audio_arr
 
149
  gr.HTML(
150
  f"""
151
  <p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> is a training and inference library for
152
+ high-fidelity text-to-speech (TTS) models.</p>
153
+ <p>The models demonstrated here, Parler-TTS <a href="https://huggingface.co/parler-tts/parler-tts-mini-v1">Mini v1</a> and <a href="https://huggingface.co/parler-tts/parler-tts-large-v1">Large v1</a>,
154
+ are trained using 45k hours of narrated English audiobooks. It generates high-quality speech
155
  with features that can be controlled using a simple text prompt (e.g. gender, background noise, speaking rate, pitch and reverberation).</p>
156
 
157
+ <p>By default, Parler-TTS generates 🎲 random voice. To ensure 🎯 <b> speaker consistency </b> across generations, these checkpoints were also trained on 34 speakers, characterized by name (e.g. Jon, Lea, Gary, Jenna, Mike, Laura).</p>
158
+
159
+ <p>To take advantage of this, simply adapt your text description to specify which speaker to use: `Jon's voice is monotone...`</p>
 
 
 
 
160
  """
161
  )
162
  with gr.Row():
163
  with gr.Column():
164
  input_text = gr.Textbox(label="Input Text", lines=2, value=default_text, elem_id="input_text")
165
+ description = gr.Textbox(label="Description", lines=2, value=default_description, elem_id="input_description")
166
+ use_large = gr.Checkbox(value=False, label="Use Large checkpoint", info="Generate with Parler-TTS Large v1 instead of Mini v1 - Better but way slower.")
167
  run_button = gr.Button("Generate Audio", variant="primary")
168
  with gr.Column():
169
  audio_out = gr.Audio(label="Parler-TTS generation", type="numpy", elem_id="audio_out")
170
 
171
+ inputs = [input_text, description, use_large]
172
  outputs = [audio_out]
 
173
  run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
174
+ gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
175
  gr.HTML(
176
  """
177
+ <p>Tips for ensuring good generation:
178
+ <ul>
179
+ <li>Include the term "very clear audio" to generate the highest quality audio, and "very noisy audio" for high levels of background noise</li>
180
+ <li>Punctuation can be used to control the prosody of the generations, e.g. use commas to add small breaks in speech</li>
181
+ <li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
182
+ </ul>
183
+ </p>
184
+
185
+ <p>Parler-TTS can be much faster. We give some tips on how to generate much more quickly in this <a href="https://github.com/huggingface/parler-tts/blob/main/INFERENCE.md"> inference guide</a>. Think SDPA, torch.compile, batching and streaming!</p>
186
+
187
+ <p>If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the
188
  <a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub.</p>
189
 
190
  <p>The Parler-TTS codebase and its associated checkpoints are licensed under <a href='https://github.com/huggingface/parler-tts?tab=Apache-2.0-1-ov-file#readme'> Apache 2.0</a>.</p>
191
  """
192
  )
193
 
194
+
195
  block.queue()
196
  block.launch(share=True)