Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
from gtts import gTTS | |
from pydub import AudioSegment | |
from pydub.generators import Sine | |
import os | |
# Cloze questions | |
cloze_questions = [ | |
{ | |
"question": "Alex [BLANK] for his daring spirit and his love for exploring the rugged landscapes around Echo Ridge.", | |
"answer": "was known", | |
"hint": "know" | |
}, | |
{ | |
"question": "One day, while exploring the local library, Alex [BLANK] upon an ancient map tucked inside a forgotten book on village lore.", | |
"answer": "stumbled", | |
"hint": "stumble" | |
}, | |
{ | |
"question": "The map [BLANK] at the location of a lost treasure, hidden deep within a cave known as Whispering Hollow.", | |
"answer": "hinted", | |
"hint": "hint" | |
}, | |
{ | |
"question": "Excited by the prospect of a real adventure, Alex [BLANK] to seek out the treasure.", | |
"answer": "decided", | |
"hint": "decide" | |
}, | |
{ | |
"question": "Knowing the journey would be risky, he [BLANK] the help of his best friends, Mia and Sam.", | |
"answer": "enlisted", | |
"hint": "enlist" | |
}, | |
{ | |
"question": "Together, they [BLANK] for the expedition, gathering supplies and studying the map extensively.", | |
"answer": "prepared", | |
"hint": "prepare" | |
}, | |
{ | |
"question": "They [BLANK] their route, took note of landmarks, and readied themselves for any challenges they might face.", | |
"answer": "planned", | |
"hint": "plan" | |
} | |
] | |
# Function to create a bell sound | |
def create_bell_sound(filename="bell.wav"): | |
# Generate a bell sound (1000 Hz tone for 200 ms) | |
tone = Sine(1000).to_audio_segment(duration=200).apply_gain(-10).fade_in(50).fade_out(50) | |
tone.export(filename, format="wav") | |
# Create the bell sound file | |
create_bell_sound() | |
# Function to convert text to speech and add bell sound | |
def text_to_speech_with_bell(text, filename): | |
tts = gTTS(text.replace("[BLANK]", "")) | |
tts.save("temp.mp3") | |
# Load the generated speech and bell sound | |
speech = AudioSegment.from_mp3("temp.mp3") | |
bell = AudioSegment.from_wav("bell.wav") | |
# Find the position of the blank and insert the bell sound | |
blank_position = text.find("[BLANK]") | |
part1 = text[:blank_position] | |
part2 = text[blank_position + len("[BLANK]"):] | |
tts_part1 = gTTS(part1).save("part1.mp3") | |
tts_part2 = gTTS(part2).save("part2.mp3") | |
speech_part1 = AudioSegment.from_mp3("part1.mp3") | |
speech_part2 = AudioSegment.from_mp3("part2.mp3") | |
# Create a silent segment (2 seconds) | |
silent_segment = AudioSegment.silent(duration=2000) | |
combined = speech_part1 + bell + silent_segment + speech_part2 | |
# Save the final audio with the bell sound inserted | |
combined.export(filename, format="mp3") | |
os.remove("temp.mp3") | |
os.remove("part1.mp3") | |
os.remove("part2.mp3") | |
return filename | |
# Function to convert text to speech without any modifications | |
def text_to_speech(text, filename): | |
tts = gTTS(text) | |
tts.save(filename) | |
return filename | |
# Generate audio files for questions | |
for i, question in enumerate(cloze_questions): | |
full_audio_filename = f"full_question_{i+1}.mp3" | |
cloze_audio_filename = f"cloze_question_{i+1}.mp3" | |
question["full_audio"] = text_to_speech(question["question"].replace("[BLANK]", question["answer"]), full_audio_filename) | |
question["cloze_audio"] = text_to_speech_with_bell(question["question"], cloze_audio_filename) | |
# Function to handle the cloze quiz | |
def cloze_quiz(state, answer): | |
name, question_index, score, results = state | |
question = cloze_questions[question_index] | |
correct = answer.strip().lower() == question["answer"].lower() | |
if correct: | |
score += 1 | |
results.append(f"Question {question_index + 1}: Correct\n") | |
else: | |
results.append(f"Question {question_index + 1}: Incorrect, the correct answer is: {question['answer']}\n") | |
question_index += 1 | |
if question_index < len(cloze_questions): | |
next_full_audio = cloze_questions[question_index]["full_audio"] | |
next_cloze_audio = cloze_questions[question_index]["cloze_audio"] | |
next_hint = f"Hint: {cloze_questions[question_index]['hint']} ◁◁◁ Check out this verb!" | |
return (name, question_index, score, results), next_full_audio, next_cloze_audio, next_hint, gr.update(visible=False), gr.update(value="", interactive=True, visible=True) | |
else: | |
result_text = f"* Name: {name}\n* Score: {score} out of {len(cloze_questions)}\n" + "\n".join(results) | |
return (name, question_index, score, results), None, None, "", gr.update(visible=True, value=result_text), gr.update(visible=False) | |
# Function to start the quiz | |
def start_quiz(name): | |
hint = f"Hint: {cloze_questions[0]['hint']} ◁◁◁ Check out this verb!" | |
return (name, 0, 0, []), cloze_questions[0]["full_audio"], cloze_questions[0]["cloze_audio"], hint, gr.update(visible=False), gr.update(visible=True) | |
# Create the Gradio interface | |
with gr.Blocks() as iface: | |
gr.Markdown("# Listening Cloze Test Instructions") | |
gr.Markdown(""" | |
**Instructions:** | |
1. Listen to the full sentence. | |
2. Listen to the cloze question with the beep sound. | |
3. Write the correct form of the verb you hear in the blank space. Remember, you do not need to write the entire sentence, just the verb. | |
**For example, if you hear "Yesterday, Alex went to the store," and then you hear "Yesterday, Alex ___ to the store" with a beep sound, you should write "went" if that is the verb you heard.** | |
**Are you ready? Let's begin!** | |
""") | |
name_input = gr.Textbox(label="Enter your name") | |
start_button = gr.Button("Start Quiz") | |
full_question_audio = gr.Audio(interactive=True, autoplay=False, label="Full Sentence Listening") | |
cloze_question_audio = gr.Audio(interactive=True, autoplay=False, label="Cloze Question") | |
hint_output = gr.Markdown() | |
answer_input = gr.Textbox(label="Your Answer") | |
next_button = gr.Button("Next") | |
result_output = gr.Textbox(label="Results", interactive=False, visible=False) | |
# Initialize the state | |
state = gr.State() | |
start_button.click(start_quiz, inputs=name_input, outputs=[state, full_question_audio, cloze_question_audio, hint_output, result_output, answer_input]) | |
next_button.click(cloze_quiz, inputs=[state, answer_input], outputs=[state, full_question_audio, cloze_question_audio, hint_output, result_output, answer_input]) | |
iface.launch(share=True) |