Spaces:
Sleeping
Sleeping
import gradio as gr | |
import edge_tts | |
import os | |
import asyncio | |
import re | |
from datetime import timedelta | |
from pydub import AudioSegment # Requires `pydub` | |
# Split text into 500-word batches for large scripts | |
def split_text(text, max_words=500): | |
words = text.split() | |
return [' '.join(words[i:i + max_words]) for i in range(0, len(words), max_words)] | |
# Split batch into SRT sections (e.g., 8-10 words per section) | |
def generate_srt_sections(text, words_per_segment=8): | |
words = re.split(r'(\s+)', text) # Split with spaces to keep punctuation | |
srt_sections = [] | |
section_text = [] | |
for word in words: | |
section_text.append(word) | |
if len(section_text) >= words_per_segment or word.endswith(('.', '!', '?')): | |
srt_sections.append(''.join(section_text).strip()) | |
section_text = [] | |
if section_text: | |
srt_sections.append(''.join(section_text).strip()) | |
return srt_sections | |
# Generate audio for a single SRT section and return its length | |
async def generate_audio_for_section(text, filename): | |
communicate = edge_tts.Communicate(text, "en-US-GuyNeural") | |
await communicate.save(filename) | |
audio = AudioSegment.from_file(filename) | |
return len(audio) / 1000 # Duration in seconds | |
# Create accurate SRT for a batch with cross-check mechanism | |
def generate_accurate_srt(batch_text, estimated_rate=0.5): | |
sections = generate_srt_sections(batch_text) | |
srt_content = "" | |
index = 1 | |
start_time = 0.0 | |
for section in sections: | |
# Generate and cross-check audio for the section | |
audio_file = f"temp_audio_{index}.mp3" | |
asyncio.run(generate_audio_for_section(section, audio_file)) | |
# Measure actual audio length for precise timing | |
actual_length = get_audio_length(audio_file) | |
end_time = start_time + actual_length | |
# Create SRT format for each section | |
start_timestamp = str(timedelta(seconds=start_time)) | |
end_timestamp = str(timedelta(seconds=end_time)) | |
srt_content += f"{index}\n{start_timestamp} --> {end_timestamp}\n{section}\n\n" | |
start_time = end_time | |
index += 1 | |
return srt_content | |
# Batch processing with section-wise cross-checking | |
def batch_process_srt_and_audio(script): | |
batches = split_text(script, max_words=500) | |
final_srt_content = "" | |
audio_files = [] | |
for batch_index, batch_text in enumerate(batches): | |
# Generate precise SRT for each batch with individual section cross-checking | |
srt_content = generate_accurate_srt(batch_text) | |
final_srt_content += srt_content | |
# Generate final batch audio and store | |
batch_audio_file = f"batch_audio_{batch_index}.mp3" | |
asyncio.run(generate_audio_for_section(batch_text, batch_audio_file)) | |
audio_files.append(batch_audio_file) | |
# Save final SRT file | |
final_srt_path = "final_output.srt" | |
with open(final_srt_path, "w") as f: | |
f.write(final_srt_content) | |
# Combine all batch audio files | |
final_audio_path = "final_combined_audio.mp3" | |
combine_audio_files(audio_files, final_audio_path) | |
return final_srt_path, final_audio_path | |
# Combine audio files into one output file | |
def combine_audio_files(audio_files, output_file): | |
combined = AudioSegment.empty() | |
for file in audio_files: | |
combined += AudioSegment.from_file(file) | |
combined.export(output_file, format="mp3") | |
# Gradio Interface | |
app = gr.Interface( | |
fn=batch_process_srt_and_audio, | |
inputs=gr.Textbox(lines=10, label="Input Script"), | |
outputs=[gr.File(label="Download SRT"), gr.File(label="Download Audio")], | |
title="Accurate Batch SRT & Audio Generator with Cross-Check", | |
description="Enter a script to generate synchronized SRT and audio files with section-wise accuracy." | |
) | |
if __name__ == "__main__": | |
app.launch() | |