mskov commited on
Commit
a6b9c5b
1 Parent(s): 3e45c8c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -2
app.py CHANGED
@@ -6,6 +6,7 @@ from evaluate.utils import launch_gradio_widget
6
  import gradio as gr
7
  import torch
8
  import pandas as pd
 
9
  import classify
10
  import replace_explitives
11
  from whisper.model import Whisper
@@ -115,6 +116,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
115
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
116
  if toxicity_score > threshold:
117
  print("threshold exceeded!! Launch intervention")
 
118
 
119
  return toxicity_score, classification_output, transcribed_text
120
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
@@ -153,8 +155,29 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
153
  if toxicity_score > threshold:
154
  print("threshold exceeded!! Launch intervention")
155
  return classify_anxiety
156
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  with gr.Blocks() as iface:
 
158
  with gr.Column():
159
  anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
160
  explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
@@ -168,7 +191,8 @@ with gr.Blocks() as iface:
168
  with gr.Column():
169
  out_val = gr.Textbox()
170
  out_class = gr.Textbox()
171
- out_text = gr.Textbox()
 
172
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text])
173
 
174
  iface.launch()
 
6
  import gradio as gr
7
  import torch
8
  import pandas as pd
9
+ import random
10
  import classify
11
  import replace_explitives
12
  from whisper.model import Whisper
 
116
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
117
  if toxicity_score > threshold:
118
  print("threshold exceeded!! Launch intervention")
119
+ intervention_output(intervention)
120
 
121
  return toxicity_score, classification_output, transcribed_text
122
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
 
155
  if toxicity_score > threshold:
156
  print("threshold exceeded!! Launch intervention")
157
  return classify_anxiety
158
+ def intervention_output(intervene):
159
+ if intervene== "Audio File":
160
+ gr.Audio.play("calm.wav")
161
+ elif intervene == "Therapy App":
162
+ out_img : gr.update(visible=True, value="hrv-breathing.gif")
163
+ elif intervene == "Text Message":
164
+ phrase = positive_affirmations()
165
+ out_text : gr.update(visible=True, value=phrase)
166
+ else:
167
+ pass
168
+
169
+ def positive_affirmations():
170
+ affirmations = [
171
+ "I have survived my anxiety before and I will survive again now",
172
+ "I am not in danger; I am just uncomfortable; this too will pass",
173
+ "I forgive and release the past and look forward to the future",
174
+ "I can't control what other people say but I can control my breathing and my response"
175
+ ]
176
+ selected_affirm = random.choice(affirmations)
177
+ return selected_affirm
178
+
179
  with gr.Blocks() as iface:
180
+ intervene_State = gr.State([])
181
  with gr.Column():
182
  anxiety_class = gr.Radio(["racism", "LGBTQ+ hate", "sexually explicit", "misophonia"])
183
  explit_preference = gr.Radio(choices=["N-Word", "B-Word", "All Explitives"], label="Words to omit from general anxiety classes", info="certain words may be acceptible within certain contects for given groups of people, and some people may be unbothered by explitives broadly speaking.")
 
191
  with gr.Column():
192
  out_val = gr.Textbox()
193
  out_class = gr.Textbox()
194
+ out_text = gr.Textbox(visible=False))
195
+ out_img = gr.Textbox(visible=False))
196
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text])
197
 
198
  iface.launch()