Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,20 +1,17 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline, AutoTokenizer
|
3 |
-
from huggingsound import SpeechRecognitionModel
|
4 |
import numpy as np
|
5 |
import soundfile as sf
|
6 |
import tempfile
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
translator = pipeline("text2text-generation", model="dammyogt/damilola-finetuned-NLP-opus-mt-en-ha")
|
12 |
tts = pipeline("text-to-speech", model="Baghdad99/hausa_voice_tts")
|
13 |
|
14 |
-
# Define the function to translate speech
|
15 |
def translate_speech(audio_data_tuple):
|
16 |
-
print(f"Type of audio: {type(audio_data_tuple)}, Value of audio: {audio_data_tuple}") # Debug line
|
17 |
-
|
18 |
# Extract the audio data from the tuple
|
19 |
sample_rate, audio_data = audio_data_tuple
|
20 |
|
@@ -22,29 +19,34 @@ def translate_speech(audio_data_tuple):
|
|
22 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as temp_audio_file:
|
23 |
sf.write(temp_audio_file.name, audio_data, sample_rate)
|
24 |
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
for transcription in transcriptions:
|
33 |
# Use the translation pipeline to translate the transcription
|
34 |
translated_text = translator(transcription, return_tensors="pt")
|
35 |
-
print(f"Translated text: {translated_text}") # Print the translated text
|
36 |
|
37 |
# Check if the translated text contains 'generated_token_ids'
|
38 |
if 'generated_token_ids' in translated_text[0]:
|
39 |
# Decode the tokens into text
|
40 |
translated_text_str = translator.tokenizer.decode(translated_text[0]['generated_token_ids'])
|
|
|
41 |
else:
|
42 |
print("The translated text does not contain 'generated_token_ids'")
|
43 |
return
|
44 |
|
45 |
# Use the text-to-speech pipeline to synthesize the translated text
|
46 |
synthesised_speech = tts(translated_text_str)
|
47 |
-
print(f"Synthesised speech: {synthesised_speech}") # Print the synthesised speech to see what it contains
|
48 |
|
49 |
# Check if the synthesised speech contains 'audio'
|
50 |
if 'audio' in synthesised_speech:
|
|
|
1 |
+
import torch # Add this line
|
2 |
import gradio as gr
|
3 |
+
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipeline, AutoTokenizer
|
|
|
4 |
import numpy as np
|
5 |
import soundfile as sf
|
6 |
import tempfile
|
7 |
|
8 |
+
# Load the models and processors
|
9 |
+
asr_model = Wav2Vec2ForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
|
10 |
+
asr_processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
|
11 |
translator = pipeline("text2text-generation", model="dammyogt/damilola-finetuned-NLP-opus-mt-en-ha")
|
12 |
tts = pipeline("text-to-speech", model="Baghdad99/hausa_voice_tts")
|
13 |
|
|
|
14 |
def translate_speech(audio_data_tuple):
|
|
|
|
|
15 |
# Extract the audio data from the tuple
|
16 |
sample_rate, audio_data = audio_data_tuple
|
17 |
|
|
|
19 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as temp_audio_file:
|
20 |
sf.write(temp_audio_file.name, audio_data, sample_rate)
|
21 |
|
22 |
+
# Prepare the input dictionary
|
23 |
+
input_dict = asr_processor(temp_audio_file.name, return_tensors="pt", padding=True)
|
24 |
+
|
25 |
+
# Use the ASR model to get the logits
|
26 |
+
logits = asr_model(input_dict.input_values.to("cpu")).logits
|
27 |
+
|
28 |
+
# Get the predicted IDs
|
29 |
+
pred_ids = torch.argmax(logits, dim=-1)[0]
|
30 |
+
|
31 |
+
# Decode the predicted IDs to get the transcription
|
32 |
+
transcription = asr_processor.decode(pred_ids)
|
33 |
+
print(f"Transcription: {transcription}") # Print the transcription
|
34 |
|
|
|
35 |
# Use the translation pipeline to translate the transcription
|
36 |
translated_text = translator(transcription, return_tensors="pt")
|
37 |
+
print(f"Translated text: {translated_text}") # Print the translated text
|
38 |
|
39 |
# Check if the translated text contains 'generated_token_ids'
|
40 |
if 'generated_token_ids' in translated_text[0]:
|
41 |
# Decode the tokens into text
|
42 |
translated_text_str = translator.tokenizer.decode(translated_text[0]['generated_token_ids'])
|
43 |
+
print(f"Translated text string: {translated_text_str}") # Print the translated text string
|
44 |
else:
|
45 |
print("The translated text does not contain 'generated_token_ids'")
|
46 |
return
|
47 |
|
48 |
# Use the text-to-speech pipeline to synthesize the translated text
|
49 |
synthesised_speech = tts(translated_text_str)
|
|
|
50 |
|
51 |
# Check if the synthesised speech contains 'audio'
|
52 |
if 'audio' in synthesised_speech:
|