mskov commited on
Commit
8cf8567
·
1 Parent(s): f839a1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -27,13 +27,13 @@ def classify_toxicity(audio_file, text_input, selected_model):
27
 
28
  # Load the selected toxicity classification model
29
  #toxicity_module = evaluate.load("toxicity", selected_model)
30
- toxicity = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection', module_type="measurement",)
31
 
32
  toxicity_results = toxicity_module.compute(predictions=[transcribed_text])
33
 
34
  toxicity_score = toxicity_results["toxicity"][0]
35
  print(toxicity_score)
36
- return toxicity_score
37
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
38
 
39
  iface = gr.Interface(
 
27
 
28
  # Load the selected toxicity classification model
29
  #toxicity_module = evaluate.load("toxicity", selected_model)
30
+ toxicity_module = evaluate.load("toxicity", 'DaNLP/da-electra-hatespeech-detection', module_type="measurement",)
31
 
32
  toxicity_results = toxicity_module.compute(predictions=[transcribed_text])
33
 
34
  toxicity_score = toxicity_results["toxicity"][0]
35
  print(toxicity_score)
36
+ return toxicity_score, transcribed_text
37
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
38
 
39
  iface = gr.Interface(