mskov commited on
Commit
a5a144e
·
1 Parent(s): fcac25d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -19
app.py CHANGED
@@ -116,10 +116,12 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
116
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
117
  if toxicity_score > threshold:
118
  print("threshold exceeded!! Launch intervention")
119
- holder = intervention_output(intervention)
 
 
120
 
121
  print("output column: ", holder)
122
- return toxicity_score, classification_output, transcribed_text, holder
123
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
124
  else:
125
  threshold = slider_logic(slider)
@@ -157,19 +159,6 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
157
  print("threshold exceeded!! Launch intervention")
158
  return classify_anxiety
159
 
160
- def intervention_output(intervene):
161
- if intervene == "Audio File":
162
- print("audio updated")
163
- return { output_col : gr.update(visible=True), out_aud : gr.update(value="./calm.wav", visible=True, autoplay=True)}
164
- elif intervene == "Therapy App":
165
- print("therapy app updated")
166
- return { output_col : gr.update(visible=True), out_img : gr.update(value="./hrv-breathing.gif", visible=True)}
167
- elif intervene == "Text Message":
168
- phrase = positive_affirmations()
169
- return { output_col : gr.update(visible=True), out_text : gr.update(value=phrase, visible=True)}
170
- else:
171
- return " "
172
-
173
  def positive_affirmations():
174
  affirmations = [
175
  "I have survived my anxiety before and I will survive again now",
@@ -195,10 +184,8 @@ with gr.Blocks() as iface:
195
  with gr.Column():
196
  out_val = gr.Textbox()
197
  out_class = gr.Textbox()
198
- with gr.Column(visible=False) as output_col:
199
  out_text = gr.Textbox()
200
- out_img = gr.Image(value="./hrv-breathing.gif")
201
- out_aud = gr.Audio(value="./calm.wav")
202
- submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, output_col])
203
 
204
  iface.launch()
 
116
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
117
  if toxicity_score > threshold:
118
  print("threshold exceeded!! Launch intervention")
119
+ affirm = positive_affirmations()
120
+ else:
121
+ affirm = ""
122
 
123
  print("output column: ", holder)
124
+ return toxicity_score, classification_output, transcribed_text, affirm
125
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
126
  else:
127
  threshold = slider_logic(slider)
 
159
  print("threshold exceeded!! Launch intervention")
160
  return classify_anxiety
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  def positive_affirmations():
163
  affirmations = [
164
  "I have survived my anxiety before and I will survive again now",
 
184
  with gr.Column():
185
  out_val = gr.Textbox()
186
  out_class = gr.Textbox()
 
187
  out_text = gr.Textbox()
188
+ out_affirm = gr.Textbox()
189
+ submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider, intervention_type], outputs=[out_val, out_class, out_text, out_affirm])
 
190
 
191
  iface.launch()