mskov commited on
Commit
b9a0cdb
·
1 Parent(s): 9bae889

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -40,15 +40,17 @@ class_options = {
40
 
41
  pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large")
42
 
 
 
 
 
 
 
43
  # Create a Gradio interface with audio file and text inputs
44
  def classify_toxicity(audio_file, text_input, classify_anxiety):
45
  # Transcribe the audio file using Whisper ASR
46
  if audio_file != None:
47
  transcribed_text = pipe(audio_file)["text"]
48
-
49
- #### Emotion classification ####
50
- emotion_classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
51
- out_prob, score, index, text_lab = emotion_classifier.classify_file(audio_file)
52
 
53
  else:
54
  transcribed_text = text_input
@@ -75,13 +77,9 @@ def classify_toxicity(audio_file, text_input, classify_anxiety):
75
  # classification_output = classifier(sequence_to_classify, candidate_labels, multi_label=False)
76
  classification_output = text_classifier(sequence_to_classify, candidate_labels, multi_label=True)
77
  print(classification_output)
78
-
79
- #### Emotion classification ####
80
-
81
- emotion_classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
82
- out_prob, score, index, text_lab = emotion_classifier.classify_file(audio_file)
83
 
84
- return toxicity_score, classification_output, emo_dict[text_lab[0]], transcribed_text
85
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
86
  else:
87
  model = whisper.load_model("large")
 
40
 
41
  pipe = pipeline("automatic-speech-recognition", model="openai/whisper-large")
42
 
43
+ def classify_emotion():
44
+ #### Emotion classification ####
45
+ emotion_classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
46
+ out_prob, score, index, text_lab = emotion_classifier.classify_file(audio_file)
47
+ return emo_dict[text_lab[0]]
48
+
49
  # Create a Gradio interface with audio file and text inputs
50
  def classify_toxicity(audio_file, text_input, classify_anxiety):
51
  # Transcribe the audio file using Whisper ASR
52
  if audio_file != None:
53
  transcribed_text = pipe(audio_file)["text"]
 
 
 
 
54
 
55
  else:
56
  transcribed_text = text_input
 
77
  # classification_output = classifier(sequence_to_classify, candidate_labels, multi_label=False)
78
  classification_output = text_classifier(sequence_to_classify, candidate_labels, multi_label=True)
79
  print(classification_output)
80
+
 
 
 
 
81
 
82
+ return toxicity_score, classification_output, transcribed_text
83
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
84
  else:
85
  model = whisper.load_model("large")