File size: 9,922 Bytes
e0f5494
26b4ff9
e0f5494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
import os
import gradio as gr
import whisperx
import numpy as np
import moviepy.editor as mp
from moviepy.audio.AudioClip import AudioArrayClip
from pytube import YouTube
import deepl
import torch
import pyrubberband as pyrb
import soundfile as sf
import librosa
from TTS.api import TTS

os.environ["COQUI_TOS_AGREED"] = "1"
HF_TOKEN = os.environ["HF_TOKEN"]
DEEPL_TOKEN = os.environ["DEEPL_TOKEN"]

# Download video from Youtube
def download_youtube_video(url):
    yt = YouTube(url)
    stream = yt.streams.filter(file_extension='mp4').first()
    output_path = stream.download()
    return output_path


# Extract audio from video
def extract_audio(video_path):
  clip = mp.VideoFileClip(video_path)
  audio_path = os.path.splitext(video_path)[0] + ".wav"
  clip.audio.write_audiofile(audio_path)
  return audio_path


# Perform speech diarization
def speech_diarization(audio_path, hf_token):
  device = "cuda"
  batch_size = 16
  compute_type = "float16"
  model = whisperx.load_model("large-v2", device, compute_type=compute_type)

  # 1. Transcribe audio
  audio = whisperx.load_audio(audio_path)
  result = model.transcribe(audio, batch_size=batch_size)

  # delete model if low on GPU resources
  import gc; gc.collect(); torch.cuda.empty_cache(); del model

  # 2. Align whisper output
  model_a, metadata = whisperx.load_align_model(language_code=result["language"], device=device)
  result = whisperx.align(result["segments"], model_a, metadata, audio, device, return_char_alignments=False)

  # delete model if low on GPU resources
  import gc; gc.collect(); torch.cuda.empty_cache(); del model_a

  # 3. Assign speaker labels
  diarize_model = whisperx.DiarizationPipeline(model_name='pyannote/[email protected]', use_auth_token=hf_token, device=device)

  # add min/max number of speakers if known
  diarize_segments = diarize_model(audio)
  # diarize_model(audio, min_speakers=min_speakers, max_speakers=max_speakers)

  result = whisperx.assign_word_speakers(diarize_segments, result)
  print(f'\n[Original transcript]:\n{result["segments"]}\n')

  return result["segments"]


# Create per speaker voice clips for tts voice cloning
def speaker_voice_clips(transcription, audio_path):
  # Create 3 uninterrupted per speaker timecodes
  snippets_timecodes = {}
  for segment in transcription:
    speaker = segment['speaker']

    if speaker not in snippets_timecodes:
      snippets_timecodes[speaker] = []

    if len(snippets_timecodes[speaker]) < 3:
      snippet = {
          'start': segment['start'],
          'end': segment['end']
      }
      snippets_timecodes[speaker].append(snippet)

  # Cut voice clips and stitch them together
  original_audio = mp.AudioFileClip(audio_path)
  audio_file_directory = os.path.dirname(audio_path)

  voice_clips = {}
  for speaker, speaker_snippets in snippets_timecodes.items():
    subclips = []
    for snippet in speaker_snippets:
      start, end = snippet['start'], snippet['end']
      subclip = original_audio.subclip(start, end)
      subclips.append(subclip)

    concatenated_clip = mp.concatenate_audioclips(subclips)

    output_filename = os.path.join(audio_file_directory, f"{speaker}_voice_clips.wav")
    concatenated_clip.write_audiofile(output_filename)
    voice_clips[speaker] = output_filename

  return voice_clips


# Perform text translation
def translate_transcript(transcript, target_language, deepl_token):
  translator = deepl.Translator(deepl_token)

  translated_transcript = []
  for segment in transcript:
    text_to_translate = segment['text']
    translated_text = translator.translate_text(text_to_translate, target_lang=target_language)

    translated_segment = {
        'start': segment['start'],
        'end': segment['end'],
        'text': translated_text.text,
        'speaker': segment['speaker']
    }

    translated_transcript.append(translated_segment)

  print(f'\n[Translated transcript]:\n{translated_transcript}\n')

  return translated_transcript


# Adjust voice pace
def adjust_voice_pace(sound_array, sample_rate, target_duration):
  duration = len(sound_array) / sample_rate
  tempo_change = duration / target_duration
  sound_array_stretched = pyrb.time_stretch(sound_array, sample_rate, tempo_change)
  return sound_array_stretched


# Perform voice cloning
def voice_cloning_translation(translated_transcription, speakers_voice_clips, target_language, speaker_model, audio_path):
  device = "cuda"

  vits_language_map = {
      'en':'eng',
      'ru':'rus',
      'uk':'ukr',
      'pl':'pol'
  }

  # Select model
  selected_model = None

  if 'vits' in speaker_model.lower() or target_language is 'uk':
    selected_model = f'tts_models/{vits_language_map[target_language]}/fairseq/vits'
  else:
    selected_model = 'tts_models/multilingual/multi-dataset/xtts_v2'

  print(selected_model)


  tts = None
  final_audio_track = None

  try:
    # TODO uncomment when https://github.com/coqui-ai/TTS/issues/3224 is resolved
    # tts = TTS(selected_model).to(device)

    # Generate and concatenate voice clips per speaker

    last_end_time = 0
    clips = []

    # Generate sentences
    for speech_item in translated_transcription:

      speech_item_duration = speech_item['end'] - speech_item['start']

      # Silence
      gap_duration = speech_item['start'] - last_end_time
      if gap_duration > 0:
        silent_audio = np.zeros((int(44100 * gap_duration), 2))
        silent_clip = AudioArrayClip(silent_audio, fps=44100)
        clips.append(silent_clip)
        print(f"\nAdded silence: Start={last_end_time}, Duration={gap_duration}")

      # Generate speech
      print(f"[{speech_item['speaker']}]")
      tts = TTS(selected_model).to(device)
      audio = tts.tts_with_vc(text=speech_item['text'], speaker_wav=speakers_voice_clips[speech_item['speaker']], language=target_language)
      sample_rate = tts.voice_converter.vc_config.audio.output_sample_rate

      # Adjust pace to fit the speech timeframe if translated audio is longer than phrase
      audio_duration = len(audio) / sample_rate
      if speech_item_duration < audio_duration:
        audio = adjust_voice_pace(audio, sample_rate, speech_item_duration)

      # Resample to higher rate
      new_sample_rate = 44100
      audio = librosa.resample(audio, orig_sr=sample_rate, target_sr=new_sample_rate)

      # Transform to AudioArrayClip object
      audio = np.expand_dims(audio, axis=1)
      audio_stereo = np.repeat(audio, 2, axis=1)
      audio_clip = AudioArrayClip(audio_stereo, fps=44100)

      # Cut out possible glitch from AudioArrayClip end
      audio_clip = audio_clip.subclip(0, audio_clip.duration - 0.2)
      clips.append(audio_clip)
      print(f"Added speech: Start={speech_item['start']}, Final duration={audio_clip.duration}, Original duration={speech_item_duration}")

      last_end_time = speech_item['start'] + audio_clip.duration

      del tts; import gc; gc.collect(); torch.cuda.empty_cache()

    # Merge sentences
    final_audio_track = mp.concatenate_audioclips(clips)

    audio_files_directory = os.path.dirname(audio_path)
    final_audio_track.write_audiofile(os.path.join(audio_files_directory, "translated_voice_track.wav"), fps=44100)

  except Exception as e:
    if tts is not None:
      import gc; gc.collect(); torch.cuda.empty_cache(); del tts
    raise e

  return final_audio_track


def dub_video(video_path, translated_audio_track, target_language):
  video = mp.VideoFileClip(video_path)
  video = video.subclip(0, translated_audio_track.duration)
  original_audio = video.audio.volumex(0.2)
  dubbed_audio = mp.CompositeAudioClip([original_audio, translated_audio_track.set_start(0)])
  video_with_dubbing = video.set_audio(dubbed_audio)

  video_with_dubbing_path = os.path.splitext(video_path)[0] + "_" + target_language + ".mp4"
  video_with_dubbing.write_videofile(video_with_dubbing_path)

  return video_with_dubbing_path


# Perform video translation
def video_translation(video_path, target_language, speaker_model, hf_token, deepl_token):

  original_audio_path = extract_audio(video_path)

  transcription = speech_diarization(original_audio_path, hf_token)

  translated_transcription = translate_transcript(transcription, target_language, deepl_token)

  speakers_voice_clips = speaker_voice_clips(transcription, original_audio_path)

  translated_audio_track = voice_cloning_translation(translated_transcription, speakers_voice_clips, target_language, speaker_model, original_audio_path)

  video_with_dubbing = dub_video(video_path, translated_audio_track, target_language)

  return video_with_dubbing

def translate_video(_, video_path, __, youtube_link, ___, target_language, speaker_model):
    try:
      if not video_path and not youtube_link:
        gr.Warning("You should either upload video or input a YouTube link")
        return None
      if youtube_link:
        video_path = download_youtube_video(youtube_link)
      dubbed_video = video_translation(video_path, target_language, speaker_model, HF_TOKEN, DEEPL_TOKEN)
    except Exception as e:
      print(f"An error occurred: {e}")
    return gr.components.Video(dubbed_video)


inputs = [
    gr.Markdown("## Currently supported languages are: English, Polish, Ukrainian and Russian"),
    gr.Video(label="Upload a video file"),
    gr.Markdown("**OR**"),
    gr.Textbox(label="Paste YouTube link"),
    gr.Markdown("---"),
    gr.Dropdown(["en", "pl", "uk", "ru"], value="pl", label="Select translation target language"),
    gr.Dropdown(["(Recommended) XTTS_V2", "VITs (will be default for Ukrainian)"], value="(Recommended) XTTS_V2", label="Select text-to-speech generation model")
]

outputs = gr.Video(label="Translated video")

gr.Interface(fn=translate_video, 
             inputs=inputs, 
             outputs=outputs, 
             title="🌐AI Video Translation",
             theme=gr.themes.Base()
             ).launch(show_error=True, debug=True)