hivecorp commited on
Commit
0ce84cc
·
verified ·
1 Parent(s): f7e1683

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +177 -91
app.py CHANGED
@@ -1,100 +1,186 @@
1
- import tempfile
2
- import edge_tts
3
  import gradio as gr
 
 
 
4
  import asyncio
5
- from concurrent.futures import ThreadPoolExecutor
6
-
7
- # Language and voice selection dictionary
8
- language_dict = {
9
- "Hindi": {
10
- "Madhur": "hi-IN-MadhurNeural",
11
- "Swara": "hi-IN-SwaraNeural"
12
- },
13
- "English": {
14
- "Jenny": "en-US-JennyNeural",
15
- "Guy": "en-US-GuyNeural",
16
- "Ana": "en-US-AnaNeural",
17
- "Aria": "en-US-AriaNeural",
18
- "Brian": "en-US-BrianNeural",
19
- "Christopher": "en-US-ChristopherNeural",
20
- "Eric": "en-US-EricNeural",
21
- "Michelle": "en-US-MichelleNeural",
22
- "Roger": "en-US-RogerNeural",
23
- "Natasha": "en-AU-NatashaNeural",
24
- "William": "en-AU-WilliamNeural",
25
- "Clara": "en-CA-ClaraNeural",
26
- "Liam": "en-CA-LiamNeural",
27
- "Libby": "en-GB-LibbyNeural",
28
- "Maisie": "en-GB-MaisieNeural",
29
- "Ryan": "en-GB-RyanNeural",
30
- "Sonia": "en-GB-SoniaNeural",
31
- "Thomas": "en-GB-ThomasNeural",
32
- "Sam": "en-HK-SamNeural",
33
- "Yan": "en-HK-YanNeural",
34
- "Connor": "en-IE-ConnorNeural",
35
- "Emily": "en-IE-EmilyNeural",
36
- "Neerja": "en-IN-NeerjaNeural",
37
- "Prabhat": "en-IN-PrabhatNeural",
38
- "Asilia": "en-KE-AsiliaNeural",
39
- "Chilemba": "en-KE-ChilembaNeural",
40
- "Abeo": "en-NG-AbeoNeural",
41
- "Ezinne": "en-NG-EzinneNeural",
42
- "Mitchell": "en-NZ-MitchellNeural",
43
- "James": "en-PH-JamesNeural",
44
- "Rosa": "en-PH-RosaNeural",
45
- "Luna": "en-SG-LunaNeural",
46
- "Wayne": "en-SG-WayneNeural",
47
- "Elimu": "en-TZ-ElimuNeural",
48
- "Imani": "en-TZ-ImaniNeural",
49
- "Leah": "en-ZA-LeahNeural",
50
- "Luke": "en-ZA-LukeNeural"
51
- },
52
- # Add other languages...
53
- }
54
-
55
- # Function to chunk text into parts of max 5000 characters
56
- def chunk_text(text, max_length=5000):
57
- return [text[i:i + max_length] for i in range(0, len(text), max_length)]
58
-
59
- # Function to generate speech for each chunk using edge_tts
60
- async def generate_speech(text_chunk, language, voice):
61
- communicate = edge_tts.Communicate(text_chunk, voice=language_dict[language][voice])
62
- audio_data = await communicate.save() # This is an awaitable method
63
- return audio_data
64
-
65
- # Function to process text and generate speech
66
- async def process_text_to_speech(text, language, voice):
67
- chunks = chunk_text(text)
68
- results = []
69
- # Generate speech for each chunk asynchronously
70
- for chunk in chunks:
71
- audio_data = await generate_speech(chunk, language, voice)
72
- results.append(audio_data)
73
-
74
- # Combine all audio parts into a single file
75
- with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as output_file:
76
- output_filename = output_file.name
77
- with open(output_filename, "wb") as f:
78
- for result in results:
79
- f.write(result) # Write the audio data to file
80
- return output_filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  # Gradio interface function
83
- async def gradio_interface(text, language, voice):
84
- audio_filename = await process_text_to_speech(text, language, voice)
85
- return audio_filename
 
 
 
86
 
87
- # Gradio UI setup
88
- iface = gr.Interface(
89
- fn=gradio_interface,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  inputs=[
91
- gr.Textbox(label="Enter Text"),
92
- gr.Dropdown(choices=list(language_dict.keys()), label="Select Language"),
93
- gr.Dropdown(choices=["Madhur", "Swara", "Jenny", "Guy", "Ana", "Aria", "Brian"], label="Select Voice")
 
 
 
 
 
 
94
  ],
95
- outputs=gr.File(label="Download Audio File"),
96
- live=True # To enable real-time input processing
 
97
  )
98
 
99
- # Launch the Gradio interface
100
- iface.launch()
 
 
 
1
  import gradio as gr
2
+ from pydub import AudioSegment
3
+ import edge_tts
4
+ import os
5
  import asyncio
6
+ import uuid
7
+ import re
8
+
9
+ # Function to get the length of an audio file in seconds
10
+ def get_audio_length(audio_file):
11
+ audio = AudioSegment.from_file(audio_file)
12
+ return audio.duration_seconds
13
+
14
+ # Function to format time for SRT
15
+ def format_time(seconds):
16
+ millis = int((seconds % 1) * 1000)
17
+ seconds = int(seconds)
18
+ hrs = seconds // 3600
19
+ mins = (seconds % 3600) // 60
20
+ secs = seconds % 60
21
+ return f"{hrs:02}:{mins:02}:{secs:02},{millis:03}"
22
+
23
+ # Function to split text into segments by punctuation or limit to 7-8 words
24
+ def split_text_into_segments(text):
25
+ segments = []
26
+ raw_segments = re.split(r'([.!?])', text)
27
+ for i in range(0, len(raw_segments) - 1, 2):
28
+ sentence = raw_segments[i].strip() + raw_segments[i + 1]
29
+ words = sentence.split()
30
+
31
+ if len(words) > 8:
32
+ for j in range(0, len(words), 8):
33
+ segments.append(" ".join(words[j:j + 8]))
34
+ else:
35
+ segments.append(sentence.strip())
36
+
37
+ if len(raw_segments) % 2 == 1:
38
+ remaining_text = raw_segments[-1].strip()
39
+ words = remaining_text.split()
40
+ for j in range(0, len(words), 8):
41
+ segments.append(" ".join(words[j:j + 8]))
42
+
43
+ return segments
44
+
45
+ # Function to generate SRT with accurate timing per batch
46
+ async def generate_accurate_srt(batch_text, batch_num, start_offset, pitch, rate, voice):
47
+ audio_file = f"batch_{batch_num}_audio.wav"
48
+
49
+ # Generate the audio using edge-tts
50
+ tts = edge_tts.Communicate(batch_text, voice, rate=rate, pitch=pitch)
51
+ await tts.save(audio_file)
52
+
53
+ # Get the actual length of the audio file
54
+ actual_length = get_audio_length(audio_file)
55
+
56
+ # Split the text into segments based on punctuation and word count
57
+ segments = split_text_into_segments(batch_text)
58
+ segment_duration = actual_length / len(segments) # Duration per segment
59
+ start_time = start_offset
60
+
61
+ # Initialize SRT content
62
+ srt_content = ""
63
+ for index, segment in enumerate(segments):
64
+ end_time = start_time + segment_duration
65
+
66
+ if end_time > start_offset + actual_length:
67
+ end_time = start_offset + actual_length
68
+
69
+ srt_content += f"{index + 1 + (batch_num * 100)}\n"
70
+ srt_content += f"{format_time(start_time)} --> {format_time(end_time)}\n"
71
+ srt_content += segment + "\n\n"
72
+
73
+ start_time = end_time
74
+
75
+ return srt_content, audio_file, start_time
76
+
77
+ # Batch processing function
78
+ async def batch_process_srt_and_audio(script_text, pitch, rate, voice, progress=gr.Progress()):
79
+ batches = [script_text[i:i + 500] for i in range(0, len(script_text), 500)]
80
+ all_srt_content = ""
81
+ combined_audio = AudioSegment.empty()
82
+ start_offset = 0.0
83
+
84
+ for batch_num, batch_text in enumerate(batches):
85
+ srt_content, audio_file, end_offset = await generate_accurate_srt(batch_text, batch_num, start_offset, pitch, rate, voice)
86
+ all_srt_content += srt_content
87
+
88
+ batch_audio = AudioSegment.from_file(audio_file)
89
+ combined_audio += batch_audio
90
+ start_offset = end_offset
91
+
92
+ os.remove(audio_file)
93
+ progress((batch_num + 1) / len(batches))
94
+
95
+ total_audio_length = combined_audio.duration_seconds
96
+ validated_srt_content = ""
97
+ for line in all_srt_content.strip().splitlines():
98
+ if '-->' in line:
99
+ start_str, end_str = line.split(' --> ')
100
+ start_time = sum(x * float(t) for x, t in zip([3600, 60, 1, 0.001], start_str.replace(',', ':').split(':')))
101
+ end_time = sum(x * float(t) for x, t in zip([3600, 60, 1, 0.001], end_str.replace(',', ':').split(':')))
102
+ if end_time > total_audio_length:
103
+ end_time = total_audio_length
104
+ line = f"{format_time(start_time)} --> {format_time(end_time)}"
105
+ validated_srt_content += line + "\n"
106
+
107
+ unique_id = uuid.uuid4()
108
+ final_audio_path = f"final_audio_{unique_id}.mp3"
109
+ final_srt_path = f"final_subtitles_{unique_id}.srt"
110
+
111
+ combined_audio.export(final_audio_path, format="mp3", bitrate="320k")
112
+
113
+ with open(final_srt_path, "w") as srt_file:
114
+ srt_file.write(validated_srt_content)
115
+
116
+ return final_srt_path, final_audio_path
117
 
118
  # Gradio interface function
119
+ async def process_script(script_text, pitch, rate, voice):
120
+ # Format pitch correctly for edge-tts
121
+ pitch_str = f"{pitch}Hz" if pitch != 0 else "-1Hz"
122
+ formatted_rate = f"{'+' if rate > 1 else ''}{int(rate)}%"
123
+ srt_path, audio_path = await batch_process_srt_and_audio(script_text, pitch_str, formatted_rate, voice_options[voice])
124
+ return srt_path, audio_path, audio_path
125
 
126
+ # Gradio interface setup
127
+ voice_options = {
128
+ "Andrew Male": "en-US-AndrewNeural",
129
+ "Jenny Female": "en-US-JennyNeural",
130
+ "Guy Male": "en-US-GuyNeural",
131
+ "Ana Female": "en-US-AnaNeural",
132
+ "Aria Female": "en-US-AriaNeural",
133
+ "Brian Male": "en-US-BrianNeural",
134
+ "Christopher Male": "en-US-ChristopherNeural",
135
+ "Eric Male": "en-US-EricNeural",
136
+ "Michelle Male": "en-US-MichelleNeural",
137
+ "Roger Male": "en-US-RogerNeural",
138
+ "Natasha Female": "en-AU-NatashaNeural",
139
+ "William Male": "en-AU-WilliamNeural",
140
+ "Clara Female": "en-CA-ClaraNeural",
141
+ "Liam Female ": "en-CA-LiamNeural",
142
+ "Libby Female": "en-GB-LibbyNeural",
143
+ "Maisie": "en-GB-MaisieNeural",
144
+ "Ryan": "en-GB-RyanNeural",
145
+ "Sonia": "en-GB-SoniaNeural",
146
+ "Thomas": "en-GB-ThomasNeural",
147
+ "Sam": "en-HK-SamNeural",
148
+ "Yan": "en-HK-YanNeural",
149
+ "Connor": "en-IE-ConnorNeural",
150
+ "Emily": "en-IE-EmilyNeural",
151
+ "Neerja": "en-IN-NeerjaNeural",
152
+ "Prabhat": "en-IN-PrabhatNeural",
153
+ "Asilia": "en-KE-AsiliaNeural",
154
+ "Chilemba": "en-KE-ChilembaNeural",
155
+ "Abeo": "en-NG-AbeoNeural",
156
+ "Ezinne": "en-NG-EzinneNeural",
157
+ "Mitchell": "en-NZ-MitchellNeural",
158
+ "James": "en-PH-JamesNeural",
159
+ "Rosa": "en-PH-RosaNeural",
160
+ "Luna": "en-SG-LunaNeural",
161
+ "Wayne": "en-SG-WayneNeural",
162
+ "Elimu": "en-TZ-ElimuNeural",
163
+ "Imani": "en-TZ-ImaniNeural",
164
+ "Leah": "en-ZA-LeahNeural",
165
+ "Luke": "en-ZA-LukeNeural"
166
+ } # All voice options
167
+
168
+ app = gr.Interface(
169
+ fn=process_script,
170
  inputs=[
171
+ gr.Textbox(label="Enter Script Text", lines=10),
172
+ gr.Slider(label="Pitch Adjustment (Hz)", minimum=-20, maximum=20, value=0, step=1),
173
+ gr.Slider(label="Rate Adjustment (%)", minimum=-50, maximum=50, value=-1, step=1),
174
+ gr.Dropdown(label="Select Voice", choices=list(voice_options.keys()), value="Andrew Male"),
175
+ ],
176
+ outputs=[
177
+ gr.File(label="Download SRT File"),
178
+ gr.File(label="Download Audio File"),
179
+ gr.Audio(label="Audio Playback")
180
  ],
181
+ title="HIVEcorp Text-to-Speech with SRT Generation",
182
+ description="Convert your script into audio and generate subtitles.",
183
+ theme="compact",
184
  )
185
 
186
+ app.launch()