Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
import requests
|
3 |
import soundfile as sf
|
4 |
import numpy as np
|
@@ -7,11 +8,13 @@ from pydub import AudioSegment
|
|
7 |
import io
|
8 |
|
9 |
# Define the Hugging Face Inference API URLs and headers
|
10 |
-
ASR_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/saad-speech-recognition-hausa-audio-to-text"
|
11 |
TTS_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/english_voice_tts"
|
12 |
TRANSLATION_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/saad-hausa-text-to-english-text"
|
13 |
headers = {"Authorization": "Bearer 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"}
|
14 |
|
|
|
|
|
|
|
15 |
# Define the function to query the Hugging Face Inference API
|
16 |
def query(api_url, payload):
|
17 |
response = requests.post(api_url, headers=headers, json=payload)
|
@@ -22,17 +25,8 @@ def translate_speech(audio_file):
|
|
22 |
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line
|
23 |
|
24 |
# Use the ASR pipeline to transcribe the audio
|
25 |
-
|
26 |
-
|
27 |
-
response = requests.post(ASR_API_URL, headers=headers, data=data)
|
28 |
-
output = response.json()
|
29 |
-
|
30 |
-
# Check if the output contains 'text'
|
31 |
-
if 'text' in output:
|
32 |
-
transcription = output["text"]
|
33 |
-
else:
|
34 |
-
print("The output does not contain 'text'")
|
35 |
-
return
|
36 |
|
37 |
# Use the translation pipeline to translate the transcription
|
38 |
translated_text = query(TRANSLATION_API_URL, {"inputs": transcription})
|
|
|
1 |
import gradio as gr
|
2 |
+
from gradio_client import Client
|
3 |
import requests
|
4 |
import soundfile as sf
|
5 |
import numpy as np
|
|
|
8 |
import io
|
9 |
|
10 |
# Define the Hugging Face Inference API URLs and headers
|
|
|
11 |
TTS_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/english_voice_tts"
|
12 |
TRANSLATION_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/saad-hausa-text-to-english-text"
|
13 |
headers = {"Authorization": "Bearer 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"}
|
14 |
|
15 |
+
# Define the Gradio client
|
16 |
+
client = Client("https://baghdad99-baghdad99-saad-speech-recognition-haus-28684af.hf.space/--replicas/22eapbdds/")
|
17 |
+
|
18 |
# Define the function to query the Hugging Face Inference API
|
19 |
def query(api_url, payload):
|
20 |
response = requests.post(api_url, headers=headers, json=payload)
|
|
|
25 |
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line
|
26 |
|
27 |
# Use the ASR pipeline to transcribe the audio
|
28 |
+
result = client.predict(audio_file.name, api_name="/predict") # Change this line
|
29 |
+
transcription = result["text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
# Use the translation pipeline to translate the transcription
|
32 |
translated_text = query(TRANSLATION_API_URL, {"inputs": transcription})
|