import gradio as gr from manipulate_model.utils import get_config_and_model, infere manpulate_config, manipulate_model = get_config_and_model() manipulate_model.eval() def process(filepath): global manipulate_model global manpulate_config out = infere(manipulate_model, filepath, manpulate_config) out = out.tolist() return str(out) demo = gr.Blocks() file_proc = gr.Interface( fn=process, inputs=[ gr.Audio(sources=["microphone", "upload"], type="filepath", show_download_button=True, label="Speech file (<30s)", max_length=30), ], outputs="text", title="Find the manipulation: Analyze 'Real' or 'Manipulated' audio.", description=( "Analyze, detect and localize manipulation in an audio with a click of a button. Upload a .wav or .flac file." ), examples=[ ["samples/fake_audio.wav"], ["samples/real_audio.wav"] ], cache_examples=True, allow_flagging="never", ) with demo: gr.TabbedInterface([file_proc], ["Find Audio Manipulation"]) demo.queue(max_size=10) demo.launch(share=True)