adeelshuaib commited on
Commit
c7bf147
1 Parent(s): 734f006

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -56
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import pipeline
3
  from speechbrain.pretrained import Tacotron2, HIFIGAN, EncoderDecoderASR
4
  import matplotlib.pyplot as plt
5
  import pandas as pd
 
6
 
7
  # Initialize psychometric model
8
  psych_model_name = "KevSun/Personality_LM"
@@ -13,18 +14,6 @@ asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-crdnn-rnnlm-l
13
  tts_model = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmp_tts")
14
  voc_model = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmp_voc")
15
 
16
- # Psychometric Test Questions
17
- text_questions = [
18
- "How do you handle criticism?",
19
- "Describe a time when you overcame a challenge.",
20
- "What motivates you to work hard?"
21
- ]
22
-
23
- audio_questions = [
24
- "What does teamwork mean to you?",
25
- "How do you handle stressful situations?"
26
- ]
27
-
28
  # Function to analyze text responses
29
  def analyze_text_responses(responses):
30
  analysis = [psych_model(response)[0] for response in responses]
@@ -37,6 +26,7 @@ def generate_audio_question(question):
37
  waveforms = voc_model.decode_batch(mel_output)
38
  return waveforms[0].numpy()
39
 
 
40
  def process_audio_response(audio):
41
  # Check if the audio input is None
42
  if audio is None:
@@ -49,63 +39,49 @@ def process_audio_response(audio):
49
  except Exception as e:
50
  return f"Error processing audio: {str(e)}"
51
 
52
- # Gradio interface functions
53
- def text_part(candidate_name, responses):
54
- traits = analyze_text_responses(responses)
55
- df = pd.DataFrame(traits.items(), columns=["Trait", "Score"])
56
- plt.figure(figsize=(8, 6))
57
- plt.bar(df["Trait"], df["Score"], color="skyblue")
58
- plt.title(f"Psychometric Analysis for {candidate_name}")
59
- plt.xlabel("Traits")
60
- plt.ylabel("Score")
61
- plt.xticks(rotation=45)
62
- plt.tight_layout()
63
- return df, plt
64
-
65
- def audio_part(candidate_name, audio_responses):
66
- # Check if any audio response is invalid (None)
67
- valid_audio_responses = [process_audio_response(audio) for audio in audio_responses if audio is not None]
68
-
69
- # If all responses are invalid, return an error message
70
- if not valid_audio_responses:
71
- return "No valid audio responses provided", None
72
 
73
- traits = analyze_text_responses(valid_audio_responses)
74
- df = pd.DataFrame(traits.items(), columns=["Trait", "Score"])
75
- plt.figure(figsize=(8, 6))
76
- plt.bar(df["Trait"], df["Score"], color="lightcoral")
77
- plt.title(f"Audio Psychometric Analysis for {candidate_name}")
78
- plt.xlabel("Traits")
79
- plt.ylabel("Score")
80
- plt.xticks(rotation=45)
81
- plt.tight_layout()
82
- return df, plt
83
-
84
-
85
- # Gradio UI function
86
  def chat_interface(candidate_name, *responses):
87
- # Separate text responses and audio responses
88
- num_text_questions = len(text_questions)
89
- text_responses = responses[:num_text_questions]
90
- audio_responses = responses[num_text_questions:]
 
 
 
 
 
91
 
92
  # Process text responses
93
- text_df, text_plot = text_part(candidate_name, text_responses)
94
 
95
  # Process audio responses
96
- audio_df, audio_plot = audio_part(candidate_name, audio_responses)
97
 
98
- return text_df, text_plot, audio_df, audio_plot
 
99
 
100
  # Create text inputs and audio inputs
101
- text_inputs = [gr.Textbox(label=f"Response to Q{i+1}: {q}") for i, q in enumerate(text_questions)]
102
- audio_inputs = [gr.Audio(label=f"Response to Q{i+1}: {q}", type="filepath") for i, q in enumerate(audio_questions)]
103
 
104
  interface = gr.Interface(
105
  fn=chat_interface,
106
  inputs=[gr.Textbox(label="Candidate Name")] + text_inputs + audio_inputs,
107
- outputs=["dataframe", "plot", "dataframe", "plot"],
108
- title="Psychometric Analysis Chatbot"
109
  )
110
 
111
  # Launch the interface
 
3
  from speechbrain.pretrained import Tacotron2, HIFIGAN, EncoderDecoderASR
4
  import matplotlib.pyplot as plt
5
  import pandas as pd
6
+ import random
7
 
8
  # Initialize psychometric model
9
  psych_model_name = "KevSun/Personality_LM"
 
14
  tts_model = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmp_tts")
15
  voc_model = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmp_voc")
16
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # Function to analyze text responses
18
  def analyze_text_responses(responses):
19
  analysis = [psych_model(response)[0] for response in responses]
 
26
  waveforms = voc_model.decode_batch(mel_output)
27
  return waveforms[0].numpy()
28
 
29
+ # Function to process audio response
30
  def process_audio_response(audio):
31
  # Check if the audio input is None
32
  if audio is None:
 
39
  except Exception as e:
40
  return f"Error processing audio: {str(e)}"
41
 
42
+ # Function to generate dynamic questions based on answers
43
+ def generate_dynamic_question(previous_answer):
44
+ # Example of simple follow-up questions based on the answer
45
+ if "teamwork" in previous_answer.lower():
46
+ return "Can you share a specific instance where you worked in a team?"
47
+ elif "challenge" in previous_answer.lower():
48
+ return "How did you overcome that challenge? What steps did you take?"
49
+ elif "stress" in previous_answer.lower():
50
+ return "How do you manage stress during high-pressure situations?"
51
+ else:
52
+ # Default follow-up question
53
+ return "Can you tell me more about that?"
 
 
 
 
 
 
 
 
54
 
55
+ # Gradio UI function to handle dynamic conversation
 
 
 
 
 
 
 
 
 
 
 
 
56
  def chat_interface(candidate_name, *responses):
57
+ conversation_history = []
58
+
59
+ # Iterate through responses to generate follow-up questions
60
+ for i, response in enumerate(responses):
61
+ conversation_history.append(f"Q{i+1}: {response}")
62
+
63
+ # Generate dynamic question based on the previous response
64
+ dynamic_question = generate_dynamic_question(response)
65
+ conversation_history.append(f"Follow-up Question: {dynamic_question}")
66
 
67
  # Process text responses
68
+ text_df, text_plot = text_part(candidate_name, responses)
69
 
70
  # Process audio responses
71
+ audio_df, audio_plot = audio_part(candidate_name, responses)
72
 
73
+ # Return conversation history and analysis
74
+ return "\n".join(conversation_history), text_df, text_plot, audio_df, audio_plot
75
 
76
  # Create text inputs and audio inputs
77
+ text_inputs = [gr.Textbox(label=f"Response to Q{i+1}:") for i in range(5)] # Assuming we have up to 5 text responses
78
+ audio_inputs = [gr.Audio(label=f"Response to Audio Q{i+1}:") for i in range(2)] # Assuming we have up to 2 audio responses
79
 
80
  interface = gr.Interface(
81
  fn=chat_interface,
82
  inputs=[gr.Textbox(label="Candidate Name")] + text_inputs + audio_inputs,
83
+ outputs=["text", "dataframe", "plot", "dataframe", "plot"],
84
+ title="Dynamic Psychometric Analysis Chatbot"
85
  )
86
 
87
  # Launch the interface