Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
50a7cb9
1
Parent(s):
fa080ce
streamline
Browse files
app.py
CHANGED
@@ -119,13 +119,6 @@ with gr.Blocks(css=css) as block:
|
|
119 |
<li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
|
120 |
</ul>
|
121 |
</p>
|
122 |
-
|
123 |
-
<p>To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data to 50k hours of speech.
|
124 |
-
The v1 release of the model will be trained on this data, as well as inference optimisations, such as flash attention
|
125 |
-
and torch compile, that will improve the latency by 2-4x.</p>
|
126 |
-
|
127 |
-
<p>If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the
|
128 |
-
<a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub.</p>
|
129 |
"""
|
130 |
)
|
131 |
with gr.Row():
|
@@ -140,7 +133,17 @@ with gr.Blocks(css=css) as block:
|
|
140 |
outputs = [audio_out]
|
141 |
gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
|
142 |
run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
|
143 |
-
gr.HTML(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
block.queue()
|
146 |
block.launch(share=True)
|
|
|
119 |
<li>The remaining speech features (gender, speaking rate, pitch and reverberation) can be controlled directly through the prompt</li>
|
120 |
</ul>
|
121 |
</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
"""
|
123 |
)
|
124 |
with gr.Row():
|
|
|
133 |
outputs = [audio_out]
|
134 |
gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
|
135 |
run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
|
136 |
+
gr.HTML(
|
137 |
+
"""
|
138 |
+
<p>To improve the prosody and naturalness of the speech further, we're scaling up the amount of training data to 50k hours of speech.
|
139 |
+
The v1 release of the model will be trained on this data, as well as inference optimisations, such as flash attention
|
140 |
+
and torch compile, that will improve the latency by 2-4x.</p>
|
141 |
+
|
142 |
+
<p>If you want to find out more about how this model was trained and even fine-tune it yourself, check-out the
|
143 |
+
<a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository on GitHub.</p>
|
144 |
+
|
145 |
+
<p>The Parler-TTS codebase and its associated checkpoints are licensed under <a href='https://github.com/huggingface/parler-tts?tab=Apache-2.0-1-ov-file#readme'> Apache 2.0</a></p>.
|
146 |
+
""")
|
147 |
|
148 |
block.queue()
|
149 |
block.launch(share=True)
|