Beehzod commited on
Commit
ad49b31
·
verified ·
1 Parent(s): f9a47e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +129 -18
app.py CHANGED
@@ -1,30 +1,141 @@
1
  import gradio as gr
2
  import torch
3
- from transformers import SpeechT5ForTextToSpeech, SpeechT5Processor, SpeechT5HifiGan
4
  import soundfile as sf
 
 
 
 
 
 
 
5
 
 
6
 
7
- model = SpeechT5ForTextToSpeech.from_pretrained("Beehzod/speecht5_finetuned_uz_customData2")
8
- processor = SpeechT5Processor.from_pretrained("Beehzod/speecht5_finetuned_uz_customData2")
9
- vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- speaker_embeddings = torch.zeros((1, 512))
12
 
13
- def text_to_speech(text):
14
-
15
- inputs = processor(text=text, return_tensors="pt")
16
- speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
17
- output_path = "output.wav"
18
- sf.write(output_path, speech.numpy(), 16000)
19
- return output_path
20
 
 
 
 
21
 
22
- interface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  fn=text_to_speech,
24
- inputs="text",
25
- outputs="audio",
26
- title="Uzbek Text-to-Speech Generator",
27
- description="Enter Uzbek text and generate speech using the finetuned SpeechT5 model."
 
 
 
 
28
  )
29
 
30
- interface.launch()
 
1
  import gradio as gr
2
  import torch
 
3
  import soundfile as sf
4
+ import spaces
5
+ import os
6
+ import numpy as np
7
+ import re
8
+ from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
9
+ from speechbrain.pretrained import EncoderClassifier
10
+ from datasets import load_dataset
11
 
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
+ def load_models_and_data():
15
+ model_name = "microsoft/speecht5_tts"
16
+ processor = SpeechT5Processor.from_pretrained(model_name)
17
+ model = SpeechT5ForTextToSpeech.from_pretrained("Beehzod/speecht5_finetuned_uz_customData2").to(device)
18
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
19
+
20
+ spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
21
+ speaker_model = EncoderClassifier.from_hparams(
22
+ source=spk_model_name,
23
+ run_opts={"device": device},
24
+ savedir=os.path.join("/tmp", spk_model_name),
25
+ )
26
+
27
+ # Load a sample from a dataset for default embedding
28
+ dataset = load_dataset("Beehzod/UzTTS_data", split="train")
29
+ example = dataset[304]
30
+
31
+ return model, processor, vocoder, speaker_model, example
32
 
33
+ model, processor, vocoder, speaker_model, default_example = load_models_and_data()
34
 
35
+ def create_speaker_embedding(waveform):
36
+ with torch.no_grad():
37
+ speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
38
+ speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
39
+ speaker_embeddings = speaker_embeddings.squeeze()
40
+ return speaker_embeddings
 
41
 
42
+ def prepare_default_embedding(example):
43
+ audio = example["audio"]
44
+ return create_speaker_embedding(audio["array"])
45
 
46
+ default_embedding = prepare_default_embedding(default_example)
47
+
48
+ replacements = [
49
+ ("а", "a"), ("б", "b"), ("в", "v"), ("г", "g"),
50
+ ("д", "d"), ("е", "e"), ("л", "l"), ("м", "m"),
51
+ ("о", "o"), ("р", "r"), ("с", "s"), ("т", "t"),
52
+ ("у", "u"), ("х", "x"), ("ю", "yu"), ("ё", "yo")
53
+ ]
54
+
55
+
56
+ number_words = {
57
+ 0: "nol", 1: "bir", 2: "ikki", 3: "uch", 4: "to'rt", 5: "besh", 6: "olti", 7: "yetti", 8: "sakkiz", 9: "to'qqiz",
58
+ 10: "o'n", 11: "o'n bir", 12: "o'n ikki", 13: "o'n uch", 14: "o'n to'rt", 15: "o'n besh", 16: "o'n oltı", 17: "o'n yetti",
59
+ 18: "o'n sakkiz", 19: "o'n toqqiz", 20: "yigirma", 30: "o'ttiz", 40: "qirq", 50: "ellik", 60: "oltmish", 70: "yetmish",
60
+ 80: "sakson", 90: "to'qson", 100: "yuz", 1000: "ming", 1000000: "million"
61
+ }
62
+ def number_to_words(number):
63
+ if number < 20:
64
+ return number_words[number]
65
+ elif number < 100:
66
+ tens, unit = divmod(number, 10)
67
+ return number_words[tens * 10] + (" " + number_words[unit] if unit else "")
68
+ elif number < 1000:
69
+ hundreds, remainder = divmod(number, 100)
70
+ return (number_words[hundreds] + " yuz" if hundreds > 1 else "yuz") + (" " + number_to_words(remainder) if remainder else "")
71
+ elif number < 1000000:
72
+ thousands, remainder = divmod(number, 1000)
73
+ return (number_to_words(thousands) + " ming" if thousands > 1 else "ming") + (" " + number_to_words(remainder) if remainder else "")
74
+ elif number < 1000000000:
75
+ millions, remainder = divmod(number, 1000000)
76
+ return number_to_words(millions) + " million" + (" " + number_to_words(remainder) if remainder else "")
77
+ elif number < 1000000000000:
78
+ billions, remainder = divmod(number, 1000000000)
79
+ return number_to_words(billions) + " milliard" + (" " + number_to_words(remainder) if remainder else "")
80
+ else:
81
+ return str(number)
82
+
83
+
84
+ def replace_numbers_with_words(text):
85
+ def replace(match):
86
+ number = int(match.group())
87
+ return number_to_words(number)
88
+
89
+ # Find the numbers and change with words.
90
+ result = re.sub(r'\b\d+\b', replace, text)
91
+
92
+ return result
93
+
94
+ def normalize_text(text):
95
+ # Convert to lowercase
96
+ text = text.lower()
97
+
98
+ # Replace numbers with words
99
+ text = replace_numbers_with_words(text)
100
+
101
+ # Apply character replacements
102
+ for old, new in replacements:
103
+ text = text.replace(old, new)
104
+
105
+ # Remove punctuation
106
+ text = re.sub(r'[^\w\s]', '', text)
107
+
108
+ return text
109
+
110
+ @spaces.GPU(duration=60)
111
+ def text_to_speech(text, audio_file=None):
112
+ # Normalize the input text
113
+ normalized_text = normalize_text(text)
114
+
115
+ # Prepare the input for the model
116
+ inputs = processor(text=normalized_text, return_tensors="pt").to(device)
117
+
118
+ # Use the default speaker embedding
119
+ speaker_embeddings = default_embedding
120
+
121
+ # Generate speech
122
+ with torch.no_grad():
123
+ speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder)
124
+
125
+ speech_np = speech.cpu().numpy()
126
+
127
+ return (16000, speech_np)
128
+
129
+ iface = gr.Interface(
130
  fn=text_to_speech,
131
+ inputs=[
132
+ gr.Textbox(label="Enter Uzbek text to convert to speech")
133
+ ],
134
+ outputs=[
135
+ gr.Audio(label="Generated Speech", type="numpy")
136
+ ],
137
+ title="Uzbek SpeechT5 Text-to-Speech Demo",
138
+ description="Enter Uzbek text, and listen to the generated speech."
139
  )
140
 
141
+ iface.launch(share=True)