File size: 3,369 Bytes
d347764 d1f4b94 d347764 d1f4b94 d347764 d1f4b94 d347764 d1f4b94 d347764 d1f4b94 d347764 d1f4b94 d347764 f805e49 c6f1d54 d1f4b94 f805e49 c737803 d347764 d1f4b94 d347764 f805e49 d347764 c737803 d1f4b94 c737803 3946ba6 c737803 d1f4b94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
import numpy as np
import torch
from datasets import load_dataset
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
speaker_embedding_path = "./speaker_embedding.npy"
replacements = [
("&", "og"),
("\r", " "),
("´", ""),
("\\", ""),
("¨", " "),
("Å", "AA"),
("Æ", "AE"),
("É", "E"),
("Ö", "OE"),
("Ø", "OE"),
("á", "a"),
("ä", "ae"),
("å", "aa"),
("è", "e"),
("î", "i"),
("ô", "oe"),
("ö", "oe"),
("ø", "oe"),
("ü", "y"),
]
def replace_danish_letters(text):
for src, dst in replacements:
text = text.replace(src, dst)
return text
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# load speech translation checkpoint
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
# load text-to-speech checkpoint and speaker embeddings
processor = SpeechT5Processor.from_pretrained("JackismyShephard/speecht5_tts-finetuned-nst-da")
model = SpeechT5ForTextToSpeech.from_pretrained("JackismyShephard/speecht5_tts-finetuned-nst-da").to(device)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
speaker_embedding = np.load(speaker_embedding_path)
speaker_embeddings = torch.tensor(speaker_embedding).unsqueeze(0)
def translate(audio):
outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "da"})
return outputs["text"]
def synthesise(text):
inputs = processor(text=text, return_tensors="pt")
speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
return speech.cpu()
def speech_to_speech_translation(audio):
translated_text = translate(audio)
translated_text = replace_danish_letters(translated_text)
print(translated_text)
synthesised_speech = synthesise(translated_text)
synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16)
return 16000, synthesised_speech
title = "Cascaded STST"
description = """
Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in English. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's
[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model fine-tuned by [JackismyShephard](https://huggingface.co/JackismyShephard) for Danish for text-to-speech:
![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation")
"""
demo = gr.Blocks()
mic_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(sources=["microphone"], type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
title=title,
description=description,
)
file_translate = gr.Interface(
fn=speech_to_speech_translation,
inputs=gr.Audio(sources=["upload"], type="filepath"),
outputs=gr.Audio(label="Generated Speech", type="numpy"),
examples=[["./example.wav"]],
title=title,
description=description,
)
with demo:
gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
demo.launch() |