Spaces:
Sleeping
Sleeping
import torch | |
import gradio as gr | |
from transformers import pipeline, AutoTokenizer, M2M100ForConditionalGeneration | |
from tokenization_small100 import SMALL100Tokenizer | |
import numpy as np | |
from pydub import AudioSegment | |
# Load the pipeline for speech recognition | |
pipe = pipeline( | |
"automatic-speech-recognition", | |
model="DrishtiSharma/whisper-large-v2-hausa", | |
tokenizer="DrishtiSharma/whisper-large-v2-hausa" | |
) | |
# Load the new translation model and tokenizer | |
model_name = 'alirezamsh/small100' | |
model = M2M100ForConditionalGeneration.from_pretrained(model_name) | |
tokenizer = SMALL100Tokenizer.from_pretrained(model_name) | |
tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts") | |
# Define the function to translate speech | |
def translate_speech(audio_file): | |
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line | |
# Load the audio file with pydub | |
audio = AudioSegment.from_mp3(audio_file) # Change this line | |
# Convert the audio to mono and get the raw data | |
audio = audio.set_channels(1) | |
audio_data = np.array(audio.get_array_of_samples()) | |
# Convert the numpy array to double | |
audio_data = audio_data.astype(np.float64) | |
# Use the speech recognition pipeline to transcribe the audio | |
output = pipe(audio_data) | |
print(f"Output: {output}") # Print the output to see what it contains | |
# Check if the output contains 'text' | |
if 'text' in output: | |
transcription = output["text"] | |
else: | |
print("The output does not contain 'text'") | |
return | |
# Use the new translation model to translate the transcription | |
text = "translate Hausa to English: " + transcription | |
tokenizer.tgt_lang = "en" | |
encoded_text = tokenizer(text, return_tensors="pt") | |
outputs = model.generate(**encoded_text) | |
# Decode the tokens into text | |
translated_text_str = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Use the text-to-speech pipeline to synthesize the translated text | |
synthesised_speech = tts(translated_text_str) | |
# Check if the synthesised speech contains 'audio' | |
if 'audio' in synthesised_speech: | |
synthesised_speech_data = synthesised_speech['audio'] | |
else: | |
print("The synthesised speech does not contain 'audio'") | |
return | |
# Flatten the audio data | |
synthesised_speech_data = synthesised_speech_data.flatten() | |
# Scale the audio data to the range of int16 format | |
synthesised_speech = (synthesised_speech_data * 32767).astype(np.int16) | |
return 16000, synthesised_speech | |
# Define the Gradio interface | |
iface = gr.Interface( | |
fn=translate_speech, | |
inputs=gr.inputs.Audio(type="filepath"), # Change this line | |
outputs=gr.outputs.Audio(type="numpy"), | |
title="Hausa to English Translation", | |
description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis." | |
) | |
iface.launch() | |