Spaces:
jfforero
/
Runtime error

jfforero commited on
Commit
2610b1c
·
verified ·
1 Parent(s): b2497fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -20
app.py CHANGED
@@ -37,29 +37,23 @@ def sentiment_vader(sentence):
37
  overall_sentiment = "Neutral"
38
  return overall_sentiment
39
 
40
- # Function to transcribe audio and perform sentiment analysis
41
- def transcribe(audio):
42
- time.sleep(3) # Simulate processing delay
43
- # In this case, just return a placeholder value
44
- return "Transcription not available"
45
-
46
- # Function to get predictions for emotion and sentiment
47
  def get_predictions(audio_input):
48
  emotion_prediction = predict_emotion_from_audio(audio_input)
49
- sentiment_prediction = sentiment_vader(transcribe(audio_input))
50
- return emotion_prediction, sentiment_prediction
51
 
52
  # Create the Gradio interface
53
- interface = gr.Interface(
54
- fn=get_predictions,
55
- inputs=gr.Audio(label="Input Audio", type="file"),
56
- outputs=[
57
- gr.Label(label="Emotion Prediction"),
58
- gr.Label(label="Sentiment Prediction")
59
- ],
60
- title="Emotional Machines Test",
61
- description="Load an audio file to analyze speech emotion and sentiment."
62
- )
 
63
 
64
- # Launch the interface
65
  interface.launch()
 
37
  overall_sentiment = "Neutral"
38
  return overall_sentiment
39
 
40
+ # Create a combined function that calls both models
 
 
 
 
 
 
41
  def get_predictions(audio_input):
42
  emotion_prediction = predict_emotion_from_audio(audio_input)
43
+ transcribe_prediction = transcribe(audio_input)
44
+ return [emotion_prediction, transcribe_prediction]
45
 
46
  # Create the Gradio interface
47
+ with gr.Blocks() as interface:
48
+ gr.Markdown("Emotional Machines test: Load or Record an audio file to speech emotion analysis")
49
+ with gr.Tabs():
50
+ with gr.Tab("Acoustic and Semantic Predictions"):
51
+ with gr.Row():
52
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
53
+ submit_button = gr.Button("Submit")
54
+ output_labels = [gr.Label(num_top_classes=8), gr.Label(num_top_classes=4)]
55
+
56
+ # Set the function to be called when the button is clicked
57
+ submit_button.click(get_predictions, inputs=input_audio, outputs=output_labels)
58
 
 
59
  interface.launch()