Spaces:
jfforero
/
Runtime error

jfforero commited on
Commit
f2b7d46
·
verified ·
1 Parent(s): 81019be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -40
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
  import numpy as np
3
  import librosa
 
4
  import requests
5
  from io import BytesIO
6
  from PIL import Image
7
  import os
8
  from tensorflow.keras.models import load_model
9
- from faster_whisper import WhisperModel
10
 
11
  # Load the emotion prediction model
12
  def load_emotion_model(model_path):
@@ -17,14 +17,25 @@ def load_emotion_model(model_path):
17
  print("Error loading emotion prediction model:", e)
18
  return None
19
 
 
 
 
 
 
 
 
 
20
  model_size = "small"
21
- # Run on CPU with INT8 compute
22
- model = WhisperModel(model_size, device="cpu", compute_type="int8")
23
 
24
- # Load emotion prediction model
25
- model_path = 'mymodel_SER_LSTM_RAVDESS.h5'
26
- emotion_model = load_emotion_model(model_path)
27
 
 
 
 
 
 
 
28
  # Function to extract MFCC features from audio
29
  def extract_mfcc(wav_file_name):
30
  try:
@@ -44,7 +55,7 @@ def predict_emotion_from_audio(wav_filepath):
44
  test_point = extract_mfcc(wav_filepath)
45
  if test_point is not None:
46
  test_point = np.reshape(test_point, newshape=(1, 40, 1))
47
- predictions = emotion_model.predict(test_point)
48
  predicted_emotion_label = np.argmax(predictions[0]) + 1
49
  return emotions[predicted_emotion_label]
50
  else:
@@ -55,43 +66,47 @@ def predict_emotion_from_audio(wav_filepath):
55
 
56
  api_key = os.getenv("DeepAI_api_key")
57
 
 
58
  # Predict emotion from audio
59
  def get_predictions(audio_input):
60
- try:
61
- audio_data = audio_input.read() # Read the audio data
62
- emotion_prediction = predict_emotion_from_audio(audio_data)
63
- image = generate_image(api_key, emotion_prediction)
64
- return emotion_prediction, image
65
- except Exception as e:
66
- print("Error processing audio:", e)
67
- return None, None
68
 
69
  # Define a function to generate an image using DeepAI Text to Image API
70
  def generate_image(api_key, text):
71
- try:
72
- url = "https://api.deepai.org/api/text2img"
73
- headers = {'api-key': api_key}
74
- response = requests.post(
75
- url,
76
- data={'text': text},
77
- headers=headers
78
- )
79
- response_data = response.json()
80
- if 'output_url' in response_data:
81
- image_url = response_data['output_url']
82
- image_response = requests.get(image_url)
83
- image = Image.open(BytesIO(image_response.content))
84
- return image
85
- else:
86
- return None
87
- except Exception as e:
88
- print("Error generating image:", e)
89
  return None
90
-
 
91
  # Create the Gradio interface
92
- with gr.Interface(get_predictions,
93
- inputs=gr.inputs.Audio(label="Input Audio", type="file"),
94
- outputs=[gr.outputs.Text(label="Prediction"), gr.outputs.Image(label="Generated Image")],
95
- title="Emotional Machines Test",
96
- description="Load or Record an audio file to perform emotion analysis") as iface:
97
- iface.launch()
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import librosa
4
+ import time
5
  import requests
6
  from io import BytesIO
7
  from PIL import Image
8
  import os
9
  from tensorflow.keras.models import load_model
 
10
 
11
  # Load the emotion prediction model
12
  def load_emotion_model(model_path):
 
17
  print("Error loading emotion prediction model:", e)
18
  return None
19
 
20
+ model_path = 'mymodel_SER_LSTM_RAVDESS.h5'
21
+ model = load_emotion_model(model_path)
22
+
23
+ #####
24
+
25
+ from faster_whisper import WhisperModel
26
+
27
+
28
  model_size = "small"
 
 
29
 
30
+ # Run on GPU with FP16
31
+ model2 = WhisperModel(model_size, device="cpu", compute_type="int8")
 
32
 
33
+ def transcribe(audio):
34
+ segments, _ = model2.transcribe(audio, beam_size=5)
35
+ return "".join([segment.text for segment in segments])
36
+
37
+ #########
38
+
39
  # Function to extract MFCC features from audio
40
  def extract_mfcc(wav_file_name):
41
  try:
 
55
  test_point = extract_mfcc(wav_filepath)
56
  if test_point is not None:
57
  test_point = np.reshape(test_point, newshape=(1, 40, 1))
58
+ predictions = model.predict(test_point)
59
  predicted_emotion_label = np.argmax(predictions[0]) + 1
60
  return emotions[predicted_emotion_label]
61
  else:
 
66
 
67
  api_key = os.getenv("DeepAI_api_key")
68
 
69
+
70
  # Predict emotion from audio
71
  def get_predictions(audio_input):
72
+ emotion_prediction = predict_emotion_from_audio(audio_input)
73
+ # Generate image here or call a separate function
74
+ image = generate_image(api_key, emotion_prediction)
75
+ return emotion_prediction, image
76
+
 
 
 
77
 
78
  # Define a function to generate an image using DeepAI Text to Image API
79
  def generate_image(api_key, text):
80
+ url = "https://api.deepai.org/api/text2img"
81
+ headers = {'api-key': api_key}
82
+ response = requests.post(
83
+ url,
84
+ data={
85
+ 'text': text,
86
+ },
87
+ headers=headers
88
+ )
89
+ response_data = response.json()
90
+ if 'output_url' in response_data:
91
+ image_url = response_data['output_url']
92
+ image_response = requests.get(image_url)
93
+ image = Image.open(BytesIO(image_response.content))
94
+ return image
95
+ else:
 
 
96
  return None
97
+ ####
98
+
99
  # Create the Gradio interface
100
+ with gr.Blocks() as interface:
101
+ gr.Markdown("Emotional Machines test: Load or Record an audio file to speech emotion analysis")
102
+ with gr.Tabs():
103
+ with gr.Tab("Acoustic and Semantic Predictions"):
104
+ with gr.Row():
105
+ input_audio = gr.Audio(label="Input Audio", type="filepath")
106
+ submit_button = gr.Button("Submit")
107
+ output_label = [gr.Label("Prediction"), gr.Image(type='pil')] # Use a single Label instead of a list
108
+
109
+ # Set the function to be called when the button is clicked
110
+ submit_button.click(get_predictions, inputs=input_audio, outputs=output_label)
111
+
112
+ interface.launch()