import gradio as gr from manipulate_model.utils import get_config_and_model manpulate_config, manipulate_model = get_config_and_model() def process(filepath): return filepath demo = gr.Blocks() file_proc = gr.Interface( fn=process, inputs=[ gr.Audio(sources=["microphone", "upload"], type="filepath", show_download_button=True, label="Speech file (<30s)", max_length=30), ], outputs="text", title="Find the manipulation: Analyze 'Real' or 'Manipulated' audio.", description=( "Analyze, detect and localize manipulation in an audio with a click of a button. Upload a .wav or .flac file." ), cache_examples=True, allow_flagging="never", ) with demo: gr.TabbedInterface([file_proc], ["Find Audio Manipulation"]) demo.queue(max_size=10) demo.launch(share=True)