with gr.Blocks() as demo: with gr.Row(): web_search = gr.Checkbox(label="Web Search", value=False) input_audio = gr.Audio(source="microphone", type="filepath", streaming=True) output_audio = gr.Audio(label="AI Response", autoplay=True) is_recording = gr.State(False) last_interaction_time = gr.State(time.time()) def toggle_recording(): return not is_recording.value def process_audio(audio, web_search, is_rec): current_time = time.time() if is_rec and (current_time - last_interaction_time.value > 2): last_interaction_time.value = current_time return transcribe_and_respond(audio, web_search), False return None, is_rec input_audio.stream(process_audio, inputs=[input_audio, web_search, is_recording], outputs=[output_audio, is_recording]) demo.load(toggle_recording, outputs=[is_recording]) # Carregando o arquivo JavaScript demo.load(None, js=""" async () => { try { const script = document.createElement('script'); script.src = 'silence_detection.js'; document.head.appendChild(script); console.log('Silence detection script loaded successfully'); } catch (error) { console.error('Error loading silence detection script:', error); } } """) if __name__ == "__main__": demo.queue(max_size=200).launch()