Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,19 +2,9 @@ import evaluate
|
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
import gradio as gr
|
4 |
from transformers import AutoModelForSequenceClassification, pipeline, RobertaForSequenceClassification, RobertaTokenizer, AutoTokenizer
|
5 |
-
import tempfile
|
6 |
|
7 |
-
tmp = tempfile.NamedTemporaryFile()
|
8 |
-
|
9 |
-
|
10 |
-
# Define the list of available models
|
11 |
-
available_models = {
|
12 |
-
"mskov/roberta-base-toxicity": "Roberta Finetuned Model"
|
13 |
-
}
|
14 |
-
|
15 |
-
|
16 |
# Create a Gradio interface with audio file and text inputs
|
17 |
-
def classify_toxicity(audio_file, text_input
|
18 |
# Transcribe the audio file using Whisper ASR
|
19 |
if audio_file != None:
|
20 |
whisper_module = evaluate.load("whisper")
|
@@ -26,7 +16,7 @@ def classify_toxicity(audio_file, text_input, selected_model):
|
|
26 |
transcribed_text = text_input
|
27 |
|
28 |
# Load the selected toxicity classification model
|
29 |
-
toxicity_module = evaluate.load("toxicity",
|
30 |
#toxicity_module = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection', module_type="measurement")
|
31 |
|
32 |
toxicity_results = toxicity_module.compute(predictions=[transcribed_text])
|
@@ -39,11 +29,10 @@ def classify_toxicity(audio_file, text_input, selected_model):
|
|
39 |
with gr.Blocks() as iface:
|
40 |
with gr.Column():
|
41 |
aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
|
42 |
-
with gr.Row():
|
43 |
text = gr.Textbox(label="Enter Text", placeholder="Enter text here...")
|
44 |
submit_btn = gr.Button(label="Submit")
|
45 |
-
|
46 |
out_text = gr.Textbox()
|
47 |
-
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text
|
48 |
|
49 |
iface.launch()
|
|
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
import gradio as gr
|
4 |
from transformers import AutoModelForSequenceClassification, pipeline, RobertaForSequenceClassification, RobertaTokenizer, AutoTokenizer
|
|
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# Create a Gradio interface with audio file and text inputs
|
7 |
+
def classify_toxicity(audio_file, text_input):
|
8 |
# Transcribe the audio file using Whisper ASR
|
9 |
if audio_file != None:
|
10 |
whisper_module = evaluate.load("whisper")
|
|
|
16 |
transcribed_text = text_input
|
17 |
|
18 |
# Load the selected toxicity classification model
|
19 |
+
toxicity_module = evaluate.load("toxicity", "mskov/roberta-base-toxicity")
|
20 |
#toxicity_module = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection', module_type="measurement")
|
21 |
|
22 |
toxicity_results = toxicity_module.compute(predictions=[transcribed_text])
|
|
|
29 |
with gr.Blocks() as iface:
|
30 |
with gr.Column():
|
31 |
aud_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File")
|
|
|
32 |
text = gr.Textbox(label="Enter Text", placeholder="Enter text here...")
|
33 |
submit_btn = gr.Button(label="Submit")
|
34 |
+
with g.Column():
|
35 |
out_text = gr.Textbox()
|
36 |
+
submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text], outputs=out_text)
|
37 |
|
38 |
iface.launch()
|