interview-bot / app.py
adeelshuaib's picture
Update app.py
c7bf147 verified
import gradio as gr
from transformers import pipeline
from speechbrain.pretrained import Tacotron2, HIFIGAN, EncoderDecoderASR
import matplotlib.pyplot as plt
import pandas as pd
import random
# Initialize psychometric model
psych_model_name = "KevSun/Personality_LM"
psych_model = pipeline("text-classification", model=psych_model_name)
# Initialize ASR and TTS models
asr_model = EncoderDecoderASR.from_hparams(source="speechbrain/asr-crdnn-rnnlm-librispeech", savedir="tmp_asr")
tts_model = Tacotron2.from_hparams(source="speechbrain/tts-tacotron2-ljspeech", savedir="tmp_tts")
voc_model = HIFIGAN.from_hparams(source="speechbrain/tts-hifigan-ljspeech", savedir="tmp_voc")
# Function to analyze text responses
def analyze_text_responses(responses):
analysis = [psych_model(response)[0] for response in responses]
traits = {response["label"]: response["score"] for response in analysis}
return traits
# Function to handle TTS
def generate_audio_question(question):
mel_output, alignment, _ = tts_model.encode_text(question)
waveforms = voc_model.decode_batch(mel_output)
return waveforms[0].numpy()
# Function to process audio response
def process_audio_response(audio):
# Check if the audio input is None
if audio is None:
return "No audio provided"
# Process the audio if it's a valid input
try:
text_response = asr_model.transcribe_file(audio)
return text_response
except Exception as e:
return f"Error processing audio: {str(e)}"
# Function to generate dynamic questions based on answers
def generate_dynamic_question(previous_answer):
# Example of simple follow-up questions based on the answer
if "teamwork" in previous_answer.lower():
return "Can you share a specific instance where you worked in a team?"
elif "challenge" in previous_answer.lower():
return "How did you overcome that challenge? What steps did you take?"
elif "stress" in previous_answer.lower():
return "How do you manage stress during high-pressure situations?"
else:
# Default follow-up question
return "Can you tell me more about that?"
# Gradio UI function to handle dynamic conversation
def chat_interface(candidate_name, *responses):
conversation_history = []
# Iterate through responses to generate follow-up questions
for i, response in enumerate(responses):
conversation_history.append(f"Q{i+1}: {response}")
# Generate dynamic question based on the previous response
dynamic_question = generate_dynamic_question(response)
conversation_history.append(f"Follow-up Question: {dynamic_question}")
# Process text responses
text_df, text_plot = text_part(candidate_name, responses)
# Process audio responses
audio_df, audio_plot = audio_part(candidate_name, responses)
# Return conversation history and analysis
return "\n".join(conversation_history), text_df, text_plot, audio_df, audio_plot
# Create text inputs and audio inputs
text_inputs = [gr.Textbox(label=f"Response to Q{i+1}:") for i in range(5)] # Assuming we have up to 5 text responses
audio_inputs = [gr.Audio(label=f"Response to Audio Q{i+1}:") for i in range(2)] # Assuming we have up to 2 audio responses
interface = gr.Interface(
fn=chat_interface,
inputs=[gr.Textbox(label="Candidate Name")] + text_inputs + audio_inputs,
outputs=["text", "dataframe", "plot", "dataframe", "plot"],
title="Dynamic Psychometric Analysis Chatbot"
)
# Launch the interface
interface.launch()