Spaces:
Running
Running
Upload backend2.py
Browse files- backend2.py +446 -0
backend2.py
ADDED
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tempfile
|
3 |
+
import os
|
4 |
+
import json
|
5 |
+
from io import BytesIO
|
6 |
+
from collections import deque
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain_openai import ChatOpenAI
|
9 |
+
from langchain.schema import HumanMessage, SystemMessage
|
10 |
+
from langchain.chains import LLMChain
|
11 |
+
from langchain.prompts import PromptTemplate
|
12 |
+
from openai import OpenAI
|
13 |
+
import time
|
14 |
+
|
15 |
+
# Imports - Keep only what's actually used. I've organized them.
|
16 |
+
from generatorgr import (
|
17 |
+
generate_and_save_questions as generate_questions_manager,
|
18 |
+
update_max_questions,
|
19 |
+
)
|
20 |
+
from generator import (
|
21 |
+
PROFESSIONS_FILE,
|
22 |
+
TYPES_FILE,
|
23 |
+
OUTPUT_FILE,
|
24 |
+
load_json_data,
|
25 |
+
generate_questions, # Keep if needed, but ensure it exists
|
26 |
+
)
|
27 |
+
from splitgpt import (
|
28 |
+
generate_and_save_questions_from_pdf3,
|
29 |
+
generate_questions_from_job_description,
|
30 |
+
)
|
31 |
+
from ai_config import convert_text_to_speech
|
32 |
+
from knowledge_retrieval import get_next_response, get_initial_question
|
33 |
+
from prompt_instructions import get_interview_initial_message_hr
|
34 |
+
from settings import language
|
35 |
+
from utils import save_interview_history
|
36 |
+
from tools import store_interview_report, read_questions_from_json
|
37 |
+
|
38 |
+
load_dotenv() # Load .env variables
|
39 |
+
|
40 |
+
class InterviewState:
|
41 |
+
"""Manages the state of the interview."""
|
42 |
+
|
43 |
+
def __init__(self):
|
44 |
+
self.reset()
|
45 |
+
|
46 |
+
def reset(self, voice="alloy"):
|
47 |
+
self.question_count = 0
|
48 |
+
# Corrected history format: List of [user_msg, bot_msg] pairs.
|
49 |
+
self.interview_history = []
|
50 |
+
self.selected_interviewer = voice
|
51 |
+
self.interview_finished = False
|
52 |
+
self.audio_enabled = True
|
53 |
+
self.temp_audio_files = []
|
54 |
+
self.initial_audio_path = None
|
55 |
+
self.interview_chain = None
|
56 |
+
self.report_chain = None
|
57 |
+
self.current_questions = []
|
58 |
+
self.history_limit = 5 # Limit the history (good for performance)
|
59 |
+
|
60 |
+
def get_voice_setting(self):
|
61 |
+
return self.selected_interviewer
|
62 |
+
|
63 |
+
interview_state = InterviewState()
|
64 |
+
|
65 |
+
def initialize_chains():
|
66 |
+
"""Initializes the LangChain LLM chains."""
|
67 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
68 |
+
if not openai_api_key:
|
69 |
+
raise ValueError(
|
70 |
+
"OpenAI API key not found. Set it in your .env file."
|
71 |
+
)
|
72 |
+
|
73 |
+
llm = ChatOpenAI(
|
74 |
+
openai_api_key=openai_api_key, model="gpt-4", temperature=0.7, max_tokens=750
|
75 |
+
)
|
76 |
+
|
77 |
+
interview_prompt_template = """
|
78 |
+
You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}.
|
79 |
+
|
80 |
+
Current Question: {current_question}
|
81 |
+
|
82 |
+
Previous conversation history:
|
83 |
+
{history}
|
84 |
+
|
85 |
+
User's response to current question: {user_input}
|
86 |
+
|
87 |
+
Your response:
|
88 |
+
"""
|
89 |
+
interview_prompt = PromptTemplate(
|
90 |
+
input_variables=["language", "current_question", "history", "user_input"],
|
91 |
+
template=interview_prompt_template,
|
92 |
+
)
|
93 |
+
interview_state.interview_chain = LLMChain(prompt=interview_prompt, llm=llm)
|
94 |
+
|
95 |
+
report_prompt_template = """
|
96 |
+
You are an HR assistant tasked with generating a concise report based on the following interview transcript in {language}:
|
97 |
+
|
98 |
+
{interview_transcript}
|
99 |
+
|
100 |
+
Summarize the candidate's performance, highlighting strengths and areas for improvement. Keep it to 3-5 sentences.
|
101 |
+
Report:
|
102 |
+
"""
|
103 |
+
report_prompt = PromptTemplate(
|
104 |
+
input_variables=["language", "interview_transcript"], template=report_prompt_template
|
105 |
+
)
|
106 |
+
interview_state.report_chain = LLMChain(prompt=report_prompt, llm=llm)
|
107 |
+
|
108 |
+
def generate_report(report_chain, history, language):
|
109 |
+
"""Generates a concise interview report."""
|
110 |
+
if report_chain is None:
|
111 |
+
raise ValueError("Report chain is not initialized.")
|
112 |
+
|
113 |
+
# Convert the Gradio-style history to a plain text transcript.
|
114 |
+
transcript = ""
|
115 |
+
for user_msg, bot_msg in history:
|
116 |
+
transcript += f"User: {user_msg}\nAssistant: {bot_msg}\n"
|
117 |
+
|
118 |
+
report = report_chain.invoke({"language": language, "interview_transcript": transcript})
|
119 |
+
return report["text"]
|
120 |
+
|
121 |
+
def reset_interview_action(voice):
|
122 |
+
"""Resets the interview state and prepares the initial message."""
|
123 |
+
interview_state.reset(voice)
|
124 |
+
initialize_chains()
|
125 |
+
print(f"[DEBUG] Interview reset. Voice: {voice}")
|
126 |
+
|
127 |
+
initial_message_text = get_interview_initial_message_hr(5) # Get initial message
|
128 |
+
|
129 |
+
# Convert to speech and save to a temporary file.
|
130 |
+
initial_audio_buffer = BytesIO()
|
131 |
+
convert_text_to_speech(initial_message_text, initial_audio_buffer, voice)
|
132 |
+
initial_audio_buffer.seek(0)
|
133 |
+
|
134 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
|
135 |
+
temp_audio_path = temp_file.name
|
136 |
+
temp_file.write(initial_audio_buffer.getvalue())
|
137 |
+
|
138 |
+
interview_state.temp_audio_files.append(temp_audio_path)
|
139 |
+
print(f"[DEBUG] Audio file saved at {temp_audio_path}")
|
140 |
+
|
141 |
+
# Return values in the correct format for Gradio.
|
142 |
+
return (
|
143 |
+
[[None, initial_message_text]], # [user_msg, bot_msg]. User starts with None.
|
144 |
+
gr.Audio(value=temp_audio_path, autoplay=True),
|
145 |
+
gr.Textbox(interactive=True), # Enable the textbox
|
146 |
+
)
|
147 |
+
|
148 |
+
def start_interview():
|
149 |
+
"""Starts the interview (used by the Gradio button)."""
|
150 |
+
return reset_interview_action(interview_state.selected_interviewer)
|
151 |
+
|
152 |
+
def construct_history_string(history):
|
153 |
+
"""Constructs a history string for the LangChain prompt."""
|
154 |
+
history_str = ""
|
155 |
+
for user_msg, bot_msg in history:
|
156 |
+
history_str += f"User: {user_msg}\nAssistant: {bot_msg}\n"
|
157 |
+
return history_str
|
158 |
+
|
159 |
+
def bot_response(chatbot, user_message_text):
|
160 |
+
"""Handles the bot's response logic."""
|
161 |
+
voice = interview_state.get_voice_setting()
|
162 |
+
history_str = construct_history_string(chatbot)
|
163 |
+
|
164 |
+
if interview_state.question_count < len(interview_state.current_questions):
|
165 |
+
current_question = interview_state.current_questions[interview_state.question_count]
|
166 |
+
|
167 |
+
response = interview_state.interview_chain.invoke(
|
168 |
+
{
|
169 |
+
"language": language,
|
170 |
+
"current_question": current_question,
|
171 |
+
"history": history_str,
|
172 |
+
"user_input": user_message_text,
|
173 |
+
}
|
174 |
+
)["text"]
|
175 |
+
|
176 |
+
interview_state.question_count += 1
|
177 |
+
|
178 |
+
# Text-to-speech
|
179 |
+
audio_buffer = BytesIO()
|
180 |
+
convert_text_to_speech(response, audio_buffer, voice)
|
181 |
+
audio_buffer.seek(0)
|
182 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
|
183 |
+
temp_audio_path = temp_file.name
|
184 |
+
temp_file.write(audio_buffer.getvalue())
|
185 |
+
interview_state.temp_audio_files.append(temp_audio_path)
|
186 |
+
|
187 |
+
# Update chatbot history in the correct format.
|
188 |
+
chatbot.append([user_message_text, response]) # Add user and bot messages
|
189 |
+
|
190 |
+
return chatbot, gr.Audio(value=temp_audio_path, autoplay=True), gr.File(visible=False)
|
191 |
+
|
192 |
+
else: # Interview finished
|
193 |
+
interview_state.interview_finished = True
|
194 |
+
conclusion_message = "Thank you for your time. The interview is complete. Please review your report."
|
195 |
+
|
196 |
+
# Text-to-speech for conclusion
|
197 |
+
conclusion_audio_buffer = BytesIO()
|
198 |
+
convert_text_to_speech(conclusion_message, conclusion_audio_buffer, voice)
|
199 |
+
conclusion_audio_buffer.seek(0)
|
200 |
+
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_conclusion_file:
|
201 |
+
temp_conclusion_audio_path = temp_conclusion_file.name
|
202 |
+
temp_conclusion_file.write(conclusion_audio_buffer.getvalue())
|
203 |
+
interview_state.temp_audio_files.append(temp_conclusion_audio_path)
|
204 |
+
|
205 |
+
# Update chatbot history.
|
206 |
+
chatbot.append([user_message_text, conclusion_message])
|
207 |
+
|
208 |
+
# Generate and save report.
|
209 |
+
report_content = generate_report(
|
210 |
+
interview_state.report_chain, chatbot, language
|
211 |
+
) # Pass Gradio history
|
212 |
+
txt_path = save_interview_history(
|
213 |
+
[f"User: {user}\nAssistant: {bot}" for user, bot in chatbot], language
|
214 |
+
) # Create plain text history
|
215 |
+
report_file_path = store_interview_report(report_content)
|
216 |
+
print(f"[DEBUG] Interview report saved at: {report_file_path}")
|
217 |
+
|
218 |
+
return (
|
219 |
+
chatbot,
|
220 |
+
gr.Audio(value=temp_conclusion_audio_path, autoplay=True),
|
221 |
+
gr.File(visible=True, value=txt_path),
|
222 |
+
)
|
223 |
+
|
224 |
+
def convert_text_to_speech_updated(text, voice="alloy"):
|
225 |
+
"""Converts text to speech and returns the file path."""
|
226 |
+
try:
|
227 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
228 |
+
response = client.audio.speech.create(model="tts-1", voice=voice, input=text)
|
229 |
+
|
230 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
|
231 |
+
for chunk in response.iter_bytes():
|
232 |
+
tmp_file.write(chunk)
|
233 |
+
temp_audio_path = tmp_file.name
|
234 |
+
return temp_audio_path
|
235 |
+
|
236 |
+
except Exception as e:
|
237 |
+
print(f"Error in text-to-speech: {e}")
|
238 |
+
return None
|
239 |
+
|
240 |
+
def transcribe_audio(audio_file_path):
|
241 |
+
"""Transcribes audio to text."""
|
242 |
+
try:
|
243 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
244 |
+
with open(audio_file_path, "rb") as audio_file:
|
245 |
+
transcription = client.audio.transcriptions.create(
|
246 |
+
model="whisper-1", file=audio_file
|
247 |
+
)
|
248 |
+
return transcription.text
|
249 |
+
except Exception as e:
|
250 |
+
print(f"Error in transcription: {e}")
|
251 |
+
return ""
|
252 |
+
|
253 |
+
def conduct_interview_updated(questions, language="English", history_limit=5):
|
254 |
+
"""Conducts the interview (LangChain/OpenAI)."""
|
255 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
256 |
+
if not openai_api_key:
|
257 |
+
raise RuntimeError("OpenAI API key not found.")
|
258 |
+
|
259 |
+
chat = ChatOpenAI(
|
260 |
+
openai_api_key=openai_api_key, model="gpt-4o", temperature=0.7, max_tokens=750
|
261 |
+
)
|
262 |
+
|
263 |
+
conversation_history = deque(maxlen=history_limit) # For LangChain, not Gradio
|
264 |
+
system_prompt = (
|
265 |
+
f"You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}. "
|
266 |
+
"Respond to user follow-up questions politely and concisely. Keep responses brief."
|
267 |
+
)
|
268 |
+
|
269 |
+
interview_data = [] # Store Q&A for potential later use
|
270 |
+
current_question_index = [0]
|
271 |
+
is_interview_finished = [False]
|
272 |
+
|
273 |
+
initial_message = (
|
274 |
+
"π Hi there, I'm Sarah, your friendly AI HR assistant! "
|
275 |
+
"I'll guide you through a series of interview questions. "
|
276 |
+
"Take your time."
|
277 |
+
)
|
278 |
+
final_message = "That wraps up our interview. Thank you for your responses!"
|
279 |
+
|
280 |
+
def interview_step(user_input, audio_input, history):
|
281 |
+
nonlocal current_question_index, is_interview_finished
|
282 |
+
|
283 |
+
if is_interview_finished[0]:
|
284 |
+
return history, "", None # No further interaction
|
285 |
+
|
286 |
+
if audio_input:
|
287 |
+
user_input = transcribe_audio(audio_input)
|
288 |
+
if not user_input:
|
289 |
+
history.append(["", "I couldn't understand your audio. Could you please repeat or type?"]) #Empty string "" so the user input is not None
|
290 |
+
audio_path = convert_text_to_speech_updated(history[-1][1]) #Access the content
|
291 |
+
return history, "", audio_path
|
292 |
+
|
293 |
+
if user_input.lower() in ["exit", "quit"]:
|
294 |
+
history.append(["", "The interview has ended. Thank you."])#Empty string "" so the user input is not None
|
295 |
+
is_interview_finished[0] = True
|
296 |
+
return history, "", None
|
297 |
+
# Crucial: Add USER INPUT to history *before* getting bot response.
|
298 |
+
history.append([user_input, ""]) # Add user input, bot response pending
|
299 |
+
|
300 |
+
question_text = questions[current_question_index[0]]
|
301 |
+
# Prepare history for LangChain (not Gradio chatbot format)
|
302 |
+
history_content = "\n".join(
|
303 |
+
[
|
304 |
+
f"Q: {entry['question']}\nA: {entry['answer']}"
|
305 |
+
for entry in conversation_history
|
306 |
+
]
|
307 |
+
)
|
308 |
+
combined_prompt = (
|
309 |
+
f"{system_prompt}\n\nPrevious conversation history:\n{history_content}\n\n"
|
310 |
+
f"Current question: {question_text}\nUser's input: {user_input}\n\n"
|
311 |
+
"Respond warmly."
|
312 |
+
)
|
313 |
+
|
314 |
+
messages = [
|
315 |
+
SystemMessage(content=system_prompt),
|
316 |
+
HumanMessage(content=combined_prompt),
|
317 |
+
]
|
318 |
+
|
319 |
+
response = chat.invoke(messages)
|
320 |
+
response_content = response.content.strip()
|
321 |
+
audio_path = convert_text_to_speech_updated(response_content)
|
322 |
+
|
323 |
+
conversation_history.append({"question": question_text, "answer": user_input})
|
324 |
+
interview_data.append({"question": question_text, "answer": user_input})
|
325 |
+
|
326 |
+
# Update Gradio-compatible history. Crucial for display.
|
327 |
+
history[-1][1] = response_content # Update the last entry with the bot's response
|
328 |
+
|
329 |
+
if current_question_index[0] + 1 < len(questions):
|
330 |
+
current_question_index[0] += 1
|
331 |
+
next_question = f"Next question: {questions[current_question_index[0]]}"
|
332 |
+
next_question_audio_path = convert_text_to_speech_updated(next_question)
|
333 |
+
# No need to add the "Next Question:" prompt to the displayed history.
|
334 |
+
# The bot will say it. Adding it here would cause a double entry.
|
335 |
+
return history, "", next_question_audio_path
|
336 |
+
else:
|
337 |
+
final_message_audio = convert_text_to_speech_updated(final_message)
|
338 |
+
history.append([None, final_message]) # Final message, no user input.
|
339 |
+
is_interview_finished[0] = True
|
340 |
+
return history, "", final_message_audio
|
341 |
+
|
342 |
+
return interview_step, initial_message, final_message
|
343 |
+
|
344 |
+
|
345 |
+
def launch_candidate_app_updated():
|
346 |
+
"""Launches the Gradio app for candidates."""
|
347 |
+
QUESTIONS_FILE_PATH = "questions.json"
|
348 |
+
|
349 |
+
try:
|
350 |
+
questions = read_questions_from_json(QUESTIONS_FILE_PATH)
|
351 |
+
if not questions:
|
352 |
+
raise ValueError("No questions found.")
|
353 |
+
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
|
354 |
+
print(f"Error loading questions: {e}")
|
355 |
+
with gr.Blocks() as error_app:
|
356 |
+
gr.Markdown(f"# Error: {e}")
|
357 |
+
return error_app
|
358 |
+
|
359 |
+
interview_func, initial_message, _ = conduct_interview_updated(questions)
|
360 |
+
|
361 |
+
def start_interview_ui():
|
362 |
+
"""Starts the interview."""
|
363 |
+
history = []
|
364 |
+
initial_combined = (
|
365 |
+
initial_message + " Let's begin! Here's the first question: " + questions[0]
|
366 |
+
)
|
367 |
+
initial_audio_path = convert_text_to_speech_updated(initial_combined)
|
368 |
+
history.append(["", initial_combined]) # Correct format: [user, bot] Empty string for user.
|
369 |
+
return history, "", initial_audio_path, gr.Textbox(interactive=True) # Return interactive textbox
|
370 |
+
|
371 |
+
def clear_interview_ui():
|
372 |
+
"""Clears the interview and resets."""
|
373 |
+
# Recreate the object in order to clear the history of the interview
|
374 |
+
nonlocal interview_func, initial_message
|
375 |
+
interview_func, initial_message, _ = conduct_interview_updated(questions)
|
376 |
+
return [], "", None, gr.Textbox(interactive=True) # Return interactive textbox
|
377 |
+
|
378 |
+
def interview_step_wrapper(user_response, audio_response, history):
|
379 |
+
"""Wrapper for the interview step function."""
|
380 |
+
history, user_text, audio_path = interview_func(user_response, audio_response, history)
|
381 |
+
# Always return interactive=True, except when interview is finished
|
382 |
+
return history, "", audio_path, gr.Textbox(value=user_text if user_text is not None else "", interactive= True)
|
383 |
+
with gr.Blocks(title="AI HR Interview Assistant") as candidate_app:
|
384 |
+
gr.Markdown(
|
385 |
+
"<h1 style='text-align: center;'>π Welcome to Your AI HR Interview Assistant</h1>"
|
386 |
+
)
|
387 |
+
start_btn = gr.Button("Start Interview", variant="primary")
|
388 |
+
chatbot = gr.Chatbot(label="Interview Chat", height=650)
|
389 |
+
audio_input = gr.Audio(
|
390 |
+
sources=["microphone"], type="filepath", label="Record Your Answer"
|
391 |
+
)
|
392 |
+
user_input = gr.Textbox(
|
393 |
+
label="Your Response",
|
394 |
+
placeholder="Type your answer here or use the microphone...",
|
395 |
+
lines=1,
|
396 |
+
interactive=True, # Make the textbox interactive initially
|
397 |
+
)
|
398 |
+
audio_output = gr.Audio(label="Response Audio", autoplay=True)
|
399 |
+
|
400 |
+
with gr.Row():
|
401 |
+
submit_btn = gr.Button("Submit", variant="primary")
|
402 |
+
clear_btn = gr.Button("Clear Chat")
|
403 |
+
|
404 |
+
def on_enter_submit(history, user_response):
|
405 |
+
"""Handles submission when Enter is pressed."""
|
406 |
+
if not user_response.strip():
|
407 |
+
return history, "", None, gr.Textbox(interactive=True) # Prevent empty submissions, keep interactive
|
408 |
+
history, _, audio_path, new_textbox = interview_step_wrapper(
|
409 |
+
user_response, None, history
|
410 |
+
) # No audio on Enter
|
411 |
+
return history, "", audio_path, new_textbox
|
412 |
+
|
413 |
+
start_btn.click(
|
414 |
+
start_interview_ui, inputs=[], outputs=[chatbot, user_input, audio_output, user_input] # Include user_input as output
|
415 |
+
)
|
416 |
+
audio_input.stop_recording(
|
417 |
+
interview_step_wrapper,
|
418 |
+
inputs=[user_input, audio_input, chatbot],
|
419 |
+
outputs=[chatbot, user_input, audio_output, user_input], # Include user_input as output
|
420 |
+
)
|
421 |
+
submit_btn.click(
|
422 |
+
interview_step_wrapper,
|
423 |
+
inputs=[user_input, audio_input, chatbot],
|
424 |
+
outputs=[chatbot, user_input, audio_output, user_input], # Include user_input
|
425 |
+
)
|
426 |
+
user_input.submit(
|
427 |
+
on_enter_submit,
|
428 |
+
inputs=[chatbot, user_input],
|
429 |
+
outputs=[chatbot, user_input, audio_output, user_input], # Include user_input
|
430 |
+
)
|
431 |
+
clear_btn.click(
|
432 |
+
clear_interview_ui, inputs=[], outputs=[chatbot, user_input, audio_output, user_input] # Include user_input
|
433 |
+
)
|
434 |
+
|
435 |
+
return candidate_app
|
436 |
+
# --- (End of Candidate Interview Implementation) ---
|
437 |
+
|
438 |
+
|
439 |
+
def cleanup():
|
440 |
+
"""Cleans up temporary audio files."""
|
441 |
+
for audio_file in interview_state.temp_audio_files:
|
442 |
+
try:
|
443 |
+
if os.path.exists(audio_file):
|
444 |
+
os.unlink(audio_file)
|
445 |
+
except Exception as e:
|
446 |
+
print(f"Error deleting file {audio_file}: {e}")
|