DrishtiSharma commited on
Commit
619b35a
1 Parent(s): aa1f998

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -12
app.py CHANGED
@@ -22,18 +22,9 @@ def predict_and_ctc_lm_decode(input_file):
22
  speech = load_and_fix_data(input_file, sampling_rate)
23
  transcribed_text = asr(speech, chunk_length_s=5, stride_length_s=1)["text"]
24
  pipe1 = pipeline("sentiment-analysis", model = "finiteautomata/beto-sentiment-analysis")
25
- sentiment = pipe1(transcribed_text)
26
- sentiment={dic["label"]: dic["score"] for dic in sentiment}
27
- pipe2 = pipeline("text-classification", model = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021")
28
- sexism_detection = pipe2(transcribed_text)
29
- sexism_detection={dic["label"]: dic["score"] for dic in sexism_detection}
30
- #sexism_detection = np.where(sexism_detection['label']== 0, 'No Sexista', 'Sexista')
31
- pipe3 = pipeline("text-classification", model = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021")
32
- harassment_detection = pipe3(transcribed_text)
33
- harassment_detection={dic["label"]: dic["score"] for dic in harassment_detection}
34
- #harassment_detection = np.where(harassment_detection['label']== 0, 'No Harassment', 'Harassment')
35
  return sentiment
36
- #sexism_detection, harassment_detection
37
 
38
  gr.Interface(
39
  predict_and_ctc_lm_decode,
@@ -41,7 +32,7 @@ gr.Interface(
41
  gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")
42
  ],
43
  #outputs=[gr.outputs.Label(num_top_classes=2),gr.outputs.Label(num_top_classes=2), gr.outputs.Label(num_top_classes=2)],
44
- outputs=[gr.outputs.Label(num_top_classes=2)],
45
  examples=[["audio_test.wav"], ["sample_audio.wav"]],
46
  title="Sentiment Analysis of Spanish Transcribed Audio",
47
  description="This is a Gradio demo for Sentiment Analysis of Transcribed Spanish Audio. First, we do Speech to Text, and then we perform sentiment analysis on the obtained transcription of the input audio.",
 
22
  speech = load_and_fix_data(input_file, sampling_rate)
23
  transcribed_text = asr(speech, chunk_length_s=5, stride_length_s=1)["text"]
24
  pipe1 = pipeline("sentiment-analysis", model = "finiteautomata/beto-sentiment-analysis")
25
+ sentiment = pipe1(transcribed_text)[0]["label"]
 
 
 
 
 
 
 
 
 
26
  return sentiment
27
+
28
 
29
  gr.Interface(
30
  predict_and_ctc_lm_decode,
 
32
  gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")
33
  ],
34
  #outputs=[gr.outputs.Label(num_top_classes=2),gr.outputs.Label(num_top_classes=2), gr.outputs.Label(num_top_classes=2)],
35
+ outputs=[gr.outputs.Textbox(label="Predicción")],
36
  examples=[["audio_test.wav"], ["sample_audio.wav"]],
37
  title="Sentiment Analysis of Spanish Transcribed Audio",
38
  description="This is a Gradio demo for Sentiment Analysis of Transcribed Spanish Audio. First, we do Speech to Text, and then we perform sentiment analysis on the obtained transcription of the input audio.",