Magnifying_Lens / app.py
Arnab Das
fix
a509d27
raw
history blame contribute delete
1.58 kB
import json
import gradio as gr
import numpy as np
import matplotlib.pyplot as plt
from manipulate_model.utils import get_config_and_model, infere
manpulate_config, manipulate_model = get_config_and_model()
manipulate_model.eval()
def process(filepath):
global manipulate_model
global manpulate_config
out = infere(manipulate_model, filepath, manpulate_config)
out = out.tolist()
#plt.clf()
#plt.figure()
#plt.plot(out)
#out_masked = np.ma.masked_less_equal(out, 0.4)
#plt.plot(out_masked, 'r', linewidth=2)
#return str(out), plt
output_json = {}
output_json["decision_scores"] = str(out)
response_text = json.dumps(output_json, indent=4)
return response_text
demo = gr.Blocks()
file_proc = gr.Interface(
fn=process,
inputs=[
#gr.Audio(sources=["microphone", "upload"], type="filepath", show_download_button=True, label="Speech file (<30s)", max_length=30),
gr.Audio(sources=["upload"], label="Speech file (<30s)", type="filepath")
],
outputs="text", #gr.Plot(label="Frame wise prediction")
title="Find the manipulation: Analyze 'Real' or 'Manipulated' audio.",
description=(
"Analyze, detect and localize manipulation in an audio with a click of a button. Upload a .wav or .flac file."
),
examples=[
["./samples/fake_audio.wav"],
["./samples/real_audio.wav"]
],
cache_examples=False,
allow_flagging="never",
)
with demo:
gr.TabbedInterface([file_proc], ["Find Audio Manipulation"])
demo.queue(max_size=10)
demo.launch(share=True)