ruslanmv commited on
Commit
3717960
Β·
verified Β·
1 Parent(s): 5798cfc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +473 -401
app.py CHANGED
@@ -1,463 +1,412 @@
1
- import gradio as gr
2
- import tempfile
3
  import os
4
  import json
5
- from io import BytesIO
6
- import subprocess
7
  from collections import deque
 
 
8
  from dotenv import load_dotenv
9
  from langchain_openai import ChatOpenAI
10
- from langchain.schema import HumanMessage, SystemMessage
11
-
12
- # Imports from other modules
13
- from generatorgr import (
14
- generate_and_save_questions as generate_questions_manager,
15
- update_max_questions,
16
- )
17
- from generator import (
18
- PROFESSIONS_FILE,
19
- TYPES_FILE,
20
- OUTPUT_FILE,
21
- load_json_data,
22
- generate_questions,
23
- )
24
- from splitgpt import (
25
- generate_and_save_questions_from_pdf3
26
- )
27
-
28
- # Placeholder imports for the manager application
29
- # Ensure these modules and functions are correctly implemented in their respective files
30
- from ai_config import convert_text_to_speech, load_model # Placeholder, needs implementation
31
- from knowledge_retrieval import (
32
- setup_knowledge_retrieval,
33
- get_next_response,
34
- generate_report,
35
- get_initial_question,
36
- ) # Placeholder, needs implementation
37
- from prompt_instructions import (
38
- get_interview_initial_message_hr,
39
- get_default_hr_questions,
40
- ) # Placeholder, needs implementation
41
- from settings import language # Placeholder, needs implementation
42
- from utils import save_interview_history # Placeholder, needs implementation
43
-
44
-
45
- class InterviewState:
46
- def __init__(self):
47
- self.reset()
48
-
49
- def reset(self, voice="alloy"):
50
- self.question_count = 0
51
- self.interview_history = []
52
- self.selected_interviewer = voice
53
- self.interview_finished = False
54
- self.audio_enabled = True
55
- self.temp_audio_files = []
56
- self.initial_audio_path = None
57
- self.admin_authenticated = False
58
- self.document_loaded = False
59
- self.knowledge_retrieval_setup = False
60
- self.interview_chain = None
61
- self.report_chain = None
62
- self.current_questions = [] # Store the current set of questions
63
-
64
- def get_voice_setting(self):
65
- return self.selected_interviewer
66
-
67
-
68
- interview_state = InterviewState()
69
-
70
-
71
- def reset_interview_action(voice):
72
- interview_state.reset(voice)
73
- n_of_questions = 5 # Default questions
74
- print(f"[DEBUG] Interview reset. Voice: {voice}")
75
-
76
- initial_message = {
77
- "role": "assistant",
78
- "content": get_interview_initial_message_hr(n_of_questions),
79
- }
80
- print(f"[DEBUG] Interview reset. Voice: {voice}")
81
- # Convert the initial message to speech
82
- initial_audio_buffer = BytesIO()
83
- convert_text_to_speech(initial_message["content"], initial_audio_buffer, voice)
84
- initial_audio_buffer.seek(0)
85
-
86
- with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
87
- temp_audio_path = temp_file.name
88
- temp_file.write(initial_audio_buffer.getvalue())
89
-
90
- interview_state.temp_audio_files.append(temp_audio_path)
91
- print(f"[DEBUG] Audio file saved at {temp_audio_path}")
92
-
93
- return (
94
- [initial_message],
95
- gr.Audio(value=temp_audio_path, autoplay=True),
96
- gr.Textbox(interactive=True),
97
- )
98
-
99
-
100
- def start_interview():
101
-
102
- return reset_interview_action(interview_state.selected_interviewer)
103
 
104
 
105
- import os
106
- from datetime import datetime
107
-
108
- def store_interview_report(report_content, folder_path="reports"):
109
- """
110
- Stores the interview report in a specified reports folder.
111
-
112
- Args:
113
- report_content (str): The content of the report to store.
114
- folder_path (str): The directory where the report will be saved.
115
-
116
- Returns:
117
- str: The file path of the saved report.
118
- """
119
- os.makedirs(folder_path, exist_ok=True)
120
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
121
- file_path = os.path.join(folder_path, f"interview_report_{timestamp}.txt")
122
-
123
- try:
124
- with open(file_path, "w", encoding="utf-8") as file:
125
- file.write(report_content)
126
- print(f"[DEBUG] Interview report saved at {file_path}")
127
- return file_path
128
- except Exception as e:
129
- print(f"[ERROR] Failed to save interview report: {e}")
130
- return None
131
-
132
-
133
- def bot_response(chatbot, message):
134
- n_of_questions = 5 # Default value
135
- interview_state.question_count += 1
136
- voice = interview_state.get_voice_setting()
137
-
138
- if interview_state.question_count == 1:
139
- response = get_initial_question(interview_state.interview_chain)
140
- else:
141
- response = get_next_response(
142
- interview_state.interview_chain,
143
- message["content"],
144
- [msg["content"] for msg in chatbot if msg.get("role") == "user"],
145
- interview_state.question_count,
146
- )
147
-
148
- # Generate and save the bot's audio response
149
- audio_buffer = BytesIO()
150
- convert_text_to_speech(response, audio_buffer, voice)
151
- audio_buffer.seek(0)
152
- with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
153
- temp_audio_path = temp_file.name
154
- temp_file.write(audio_buffer.getvalue())
155
-
156
- interview_state.temp_audio_files.append(temp_audio_path)
157
- chatbot.append({"role": "assistant", "content": response})
158
-
159
- # Check if the interview is finished
160
- if interview_state.question_count >= n_of_questions:
161
- interview_state.interview_finished = True
162
- conclusion_message = (
163
- "Thank you for your time. The interview is complete. Please review your report."
164
- )
165
-
166
- # Generate conclusion audio message
167
- conclusion_audio_buffer = BytesIO()
168
- convert_text_to_speech(conclusion_message, conclusion_audio_buffer, voice)
169
- conclusion_audio_buffer.seek(0)
170
- with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_conclusion_file:
171
- temp_conclusion_audio_path = temp_conclusion_file.name
172
- temp_conclusion_file.write(conclusion_audio_buffer.getvalue())
173
- interview_state.temp_audio_files.append(temp_conclusion_audio_path)
174
-
175
- # Append conclusion message to chatbot history
176
- chatbot.append({"role": "system", "content": conclusion_message})
177
-
178
- # Generate the HR report content
179
- report_content = generate_report(
180
- interview_state.report_chain,
181
- [msg["content"] for msg in chatbot],
182
- language,
183
- )
184
-
185
- # Save the interview history
186
- txt_path = save_interview_history(
187
- [msg["content"] for msg in chatbot], language
188
- )
189
- print(f"[DEBUG] Interview history saved at: {txt_path}")
190
-
191
- # Save the report to the reports folder
192
- report_file_path = store_interview_report(report_content)
193
- print(f"[DEBUG] Interview report saved at: {report_file_path}")
194
-
195
- return chatbot, gr.File(visible=True, value=txt_path), gr.Audio(value=temp_conclusion_audio_path, autoplay=True)
196
-
197
- return chatbot, gr.Audio(value=temp_audio_path, autoplay=True)
198
-
199
-
200
- # --- Candidate Interview Implementation ---
201
  load_dotenv()
202
 
203
  # Function to read questions from JSON
204
  def read_questions_from_json(file_path):
205
  if not os.path.exists(file_path):
206
  raise FileNotFoundError(f"The file '{file_path}' does not exist.")
207
-
208
- with open(file_path, 'r') as f:
209
  questions_list = json.load(f)
210
-
211
  if not questions_list:
212
  raise ValueError("The JSON file is empty or has invalid content.")
213
-
214
  return questions_list
215
 
216
- # Conduct interview and handle user input
 
 
 
 
 
 
 
 
 
 
217
 
218
- import os
219
- import json
220
- from io import BytesIO
221
- import tempfile
222
- from collections import deque
223
- from langchain_openai import ChatOpenAI
224
- from langchain.schema import HumanMessage, SystemMessage
225
 
226
- # Placeholder imports (ensure these are correctly implemented)
227
- from ai_config import convert_text_to_speech # For text-to-speech
228
- from knowledge_retrieval import generate_report # For report generation
229
- from utils import save_interview_history # For saving interview history
230
- from settings import language # Placeholder, needs implementation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
- # Assuming you have interview_state defined elsewhere and accessible here
233
- # interview_state = InterviewState() # You might need to initialize this or pass it as a parameter
234
 
235
  def conduct_interview(questions, language="English", history_limit=5):
 
 
 
 
 
236
  openai_api_key = os.getenv("OPENAI_API_KEY")
237
  if not openai_api_key:
238
- raise RuntimeError(
239
- "OpenAI API key not found. Please add it to your .env file as OPENAI_API_KEY."
240
- )
241
 
 
242
  chat = ChatOpenAI(
243
- openai_api_key=openai_api_key, model="gpt-4", temperature=0.7, max_tokens=750
 
 
 
244
  )
245
 
246
  conversation_history = deque(maxlen=history_limit)
247
  system_prompt = (
248
  f"You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}. "
249
- "Respond to user follow-up questions politely and concisely. If the user is confused, provide clear clarification."
 
 
 
 
 
 
 
250
  )
251
 
252
- interview_data = []
253
- current_question_index = [0]
 
 
 
 
254
 
255
  initial_message = (
256
  "πŸ‘‹ Hi there, I'm Sarah, your friendly AI HR assistant! "
257
  "I'll guide you through a series of interview questions to learn more about you. "
258
  "Take your time and answer each question thoughtfully."
259
  )
 
 
 
 
260
 
261
- def interview_step(user_input, history):
262
 
263
- if user_input.lower() in ["exit", "quit"]:
264
- history.append(
265
- {
266
- "role": "assistant",
267
- "content": "The interview has ended at your request. Thank you for your time!",
268
- }
269
- )
270
- return history, ""
271
 
272
- question_text = questions[current_question_index[0]]
273
- history_content = "\n".join(
274
- [
275
- f"Q: {entry['question']}\nA: {entry['answer']}"
276
- for entry in conversation_history
277
- ]
278
- )
279
- combined_prompt = (
280
- f"{system_prompt}\n\nPrevious conversation history:\n{history_content}\n\n"
281
- f"Current question: {question_text}\nUser's input: {user_input}\n\n"
282
- "Respond in a warm and conversational way, offering natural follow-ups if needed."
283
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
 
285
- messages = [
286
- SystemMessage(content=system_prompt),
287
- HumanMessage(content=combined_prompt),
288
- ]
 
289
 
290
- response = chat.invoke(messages)
291
- response_content = response.content.strip()
 
 
 
292
 
293
- # --- Integrated bot_response functionality starts here ---
294
 
295
- interview_state.question_count += 1
296
- voice = interview_state.get_voice_setting() # Get voice setting
297
 
298
- # Generate and save the bot's audio response
299
- audio_buffer = BytesIO()
300
- convert_text_to_speech(response_content, audio_buffer, voice)
301
- audio_buffer.seek(0)
302
- with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
303
- temp_audio_path = temp_file.name
304
- temp_file.write(audio_buffer.getvalue())
 
 
 
 
305
 
306
- interview_state.temp_audio_files.append(temp_audio_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
 
308
- # --- Integrated bot_response functionality ends here ---
309
 
310
- conversation_history.append({"question": question_text, "answer": user_input})
311
- interview_data.append({"question": question_text, "answer": user_input})
312
- history.append({"role": "user", "content": user_input})
313
- history.append({"role": "assistant", "content": response_content, "audio": temp_audio_path}) # Store audio path
314
 
315
- if current_question_index[0] + 1 < len(questions):
316
- current_question_index[0] += 1
317
- next_question = f"Alright, let's move on. {questions[current_question_index[0]]}"
318
- history.append({"role": "assistant", "content": next_question})
319
 
 
 
320
  else:
321
- conclusion_message = "That wraps up our interview. Thank you so much for your responsesβ€”it's been great learning more about you!"
322
- history.append(
323
- {"role": "assistant", "content": conclusion_message}
324
- )
325
 
326
- # --- Generate report and save history (only at the end) ---
327
- interview_state.interview_finished = True
328
 
329
- # Generate the HR report content
330
- report_content = generate_report(
331
- interview_state.report_chain,
332
- [msg["content"] for msg in history if msg["role"] != "system"], # Consider only user/assistant messages
333
- language,
334
- )
335
 
336
- # Save the interview history
337
- txt_path = save_interview_history(
338
- [msg["content"] for msg in history if msg["role"] != "system"], language # Consider only user/assistant messages
339
- )
340
- print(f"[DEBUG] Interview history saved at: {txt_path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
 
342
- # Save the report to the reports folder
343
- report_file_path = store_interview_report(report_content)
344
- print(f"[DEBUG] Interview report saved at: {report_file_path}")
345
 
346
- return history, ""
 
347
 
348
- return interview_step, initial_message
349
 
 
 
350
 
351
 
352
- def launch_candidate_app():
353
  QUESTIONS_FILE_PATH = "questions.json"
 
 
 
 
 
 
 
354
 
355
- def start_interview_ui():
356
- # Reload questions every time the interview starts
357
- interview_state.current_questions = read_questions_from_json(QUESTIONS_FILE_PATH)
358
- interview_func, initial_message = conduct_interview(interview_state.current_questions)
359
- interview_state.interview_func = interview_func
360
-
361
- history = [{"role": "assistant", "content": initial_message}]
362
- history.append({"role": "assistant", "content": "Let's begin! Here's your first question: " + interview_state.current_questions[0]})
363
- return history, ""
364
-
365
- def clear_interview_ui():
366
- # Reset state when clearing the interview
367
- interview_state.reset()
368
- return [], ""
369
-
370
- def on_enter_submit_ui(history, user_response):
371
- if not user_response.strip():
372
- return history, ""
373
- history, _ = interview_state.interview_func(user_response, history)
374
- return history, ""
375
-
376
- with gr.Blocks(title="AI HR Interview Assistant") as candidate_app:
377
- gr.Markdown("<h1 style='text-align: center;'>πŸ‘‹ Welcome to Your AI HR Interview Assistant</h1>")
378
- start_btn = gr.Button("Start Interview", variant="primary")
379
- chatbot = gr.Chatbot(label="Interview Chat", height=650, type="messages")
380
- user_input = gr.Textbox(label="Your Response", placeholder="Type your answer here...", lines=1)
381
- with gr.Row():
382
- submit_btn = gr.Button("Submit")
383
- clear_btn = gr.Button("Clear Chat")
384
 
385
- start_btn.click(start_interview_ui, inputs=[], outputs=[chatbot, user_input])
386
- submit_btn.click(on_enter_submit_ui, inputs=[chatbot, user_input], outputs=[chatbot, user_input])
387
- user_input.submit(on_enter_submit_ui, inputs=[chatbot, user_input], outputs=[chatbot, user_input])
388
- clear_btn.click(clear_interview_ui, inputs=[], outputs=[chatbot, user_input])
389
-
390
- return candidate_app
391
-
392
-
393
- def create_manager_app():
394
- with gr.Blocks(
395
- title="AI HR Interviewer Manager",
396
- css="""
397
- .tab-button {
398
- background-color: #f0f0f0;
399
- color: #333;
400
- padding: 10px 20px;
401
- border: none;
402
- cursor: pointer;
403
- font-size: 16px;
404
- transition: background-color 0.3s ease;
405
- }
406
- .tab-button:hover {
407
- background-color: #d0d0d0;
408
- }
409
- .tab-button.selected {
410
- background-color: #666;
411
- color: white;
412
- }
413
- """,
414
- ) as manager_app:
415
- gr.HTML(
416
- """
417
- <div style='text-align: center; margin-bottom: 20px;'>
418
- <h1 style='font-size: 36px; color: #333;'>AI HR Interviewer Manager</h1>
419
- <p style='font-size: 18px; color: #666;'>Select your role to start the interview process.</p>
420
- </div>
421
- """
422
  )
 
 
 
 
 
 
423
 
424
  with gr.Row():
425
- user_role = gr.Dropdown(
426
- choices=["Admin", "Candidate"],
427
- label="Select User Role",
428
- value="Candidate",
429
- )
430
- proceed_button = gr.Button("πŸ‘‰ Proceed")
431
 
432
- candidate_ui = gr.Column(visible=False)
433
- admin_ui = gr.Column(visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434
 
435
- with candidate_ui:
436
- gr.Markdown("## πŸš€ Candidate Interview")
437
- candidate_app = launch_candidate_app()
438
 
439
- with admin_ui:
440
- gr.Markdown("## πŸ”’ Admin Panel")
441
  with gr.Tab("Generate Questions"):
442
  try:
 
 
 
 
 
 
 
 
 
 
 
 
443
  professions_data = load_json_data(PROFESSIONS_FILE)
444
- types_data = load_json_data(TYPES_FILE)
 
445
  except (FileNotFoundError, json.JSONDecodeError) as e:
446
  print(f"Error loading data from JSON files: {e}")
447
  professions_data = []
448
- types_data = []
449
 
450
  profession_names = [
451
  item["profession"] for item in professions_data
452
- ]
453
- interview_types = [item["type"] for item in types_data]
 
 
 
454
 
455
  with gr.Row():
456
  profession_input = gr.Dropdown(
457
- label="Select Profession", choices=profession_names
 
458
  )
459
  interview_type_input = gr.Dropdown(
460
- label="Select Interview Type", choices=interview_types
 
461
  )
462
 
463
  num_questions_input = gr.Number(
@@ -470,12 +419,14 @@ def create_manager_app():
470
  overwrite_input = gr.Checkbox(
471
  label="Overwrite all_questions.json?", value=True
472
  )
 
473
  # Update num_questions_input when interview_type_input changes
474
  interview_type_input.change(
475
  fn=update_max_questions,
476
  inputs=interview_type_input,
477
  outputs=num_questions_input,
478
  )
 
479
  generate_button = gr.Button("Generate Questions")
480
 
481
  output_text = gr.Textbox(label="Output")
@@ -496,16 +447,35 @@ def create_manager_app():
496
  with gr.Tab("Generate from PDF"):
497
  gr.Markdown("### πŸ“„ Upload PDF for Question Generation")
498
  pdf_file_input = gr.File(label="Upload PDF File", type="filepath")
499
- num_questions_pdf_input = gr.Number(label="Number of Questions", value=5, precision=0)
500
-
 
 
 
 
 
 
501
  pdf_status_output = gr.Textbox(label="Status", lines=3)
502
  pdf_question_output = gr.JSON(label="Generated Questions")
503
-
504
  generate_pdf_button = gr.Button("Generate Questions from PDF")
505
 
506
  def update_pdf_ui(pdf_path, num_questions):
 
 
 
 
 
 
507
  for status, questions in generate_and_save_questions_from_pdf3(pdf_path, num_questions):
508
- yield gr.update(value=status), gr.update(value=questions)
 
 
 
 
 
 
 
509
 
510
  generate_pdf_button.click(
511
  update_pdf_ui,
@@ -513,39 +483,141 @@ def create_manager_app():
513
  outputs=[pdf_status_output, pdf_question_output],
514
  )
515
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516
 
 
 
 
 
 
 
 
 
517
 
518
 
519
- def show_selected_ui(role):
520
- if role == "Candidate":
521
- return {candidate_ui: gr.Column(visible=True), admin_ui: gr.Column(visible=False)}
522
 
523
- elif role == "Admin":
524
- return {candidate_ui: gr.Column(visible=False), admin_ui: gr.Column(visible=True)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  else:
526
- return {candidate_ui: gr.Column(visible=False), admin_ui: gr.Column(visible=False)}
 
 
 
 
 
 
 
527
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528
 
529
- proceed_button.click(
530
- show_selected_ui,
531
- inputs=[user_role],
532
- outputs=[candidate_ui, admin_ui],
 
533
  )
534
 
535
- return manager_app
 
 
 
 
 
536
 
537
- def cleanup():
538
- for audio_file in interview_state.temp_audio_files:
539
- try:
540
- if os.path.exists(audio_file):
541
- os.unlink(audio_file)
542
- except Exception as e:
543
- print(f"Error deleting file {audio_file}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
 
546
  if __name__ == "__main__":
547
- manager_app = create_manager_app()
548
- try:
549
- manager_app.launch(server_name="0.0.0.0", server_port=7860, debug=True)
550
- finally:
551
- cleanup()
 
1
+
 
2
  import os
3
  import json
4
+ import time
5
+ import tempfile
6
  from collections import deque
7
+
8
+ import gradio as gr
9
  from dotenv import load_dotenv
10
  from langchain_openai import ChatOpenAI
11
+ from langchain.schema import HumanMessage, SystemMessage, AIMessage # Import AIMessage
12
+ from openai import OpenAI
13
+ from datetime import datetime # Import datetime for timestamp
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
 
16
+ # Load environment variables
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  load_dotenv()
18
 
19
  # Function to read questions from JSON
20
  def read_questions_from_json(file_path):
21
  if not os.path.exists(file_path):
22
  raise FileNotFoundError(f"The file '{file_path}' does not exist.")
23
+ with open(file_path, 'r', encoding='utf-8') as f:
 
24
  questions_list = json.load(f)
 
25
  if not questions_list:
26
  raise ValueError("The JSON file is empty or has invalid content.")
 
27
  return questions_list
28
 
29
+ # Function to save interview history to JSON
30
+ def save_interview_history(history, filename="interview_history.json"):
31
+ """Saves the interview history to a JSON file."""
32
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
33
+ filepath = f"{timestamp}_{filename}"
34
+ try:
35
+ with open(filepath, 'w', encoding='utf-8') as f:
36
+ json.dump(history, f, ensure_ascii=False, indent=4)
37
+ print(f"Interview history saved to: {filepath}")
38
+ except Exception as e:
39
+ print(f"Error saving interview history: {e}")
40
 
 
 
 
 
 
 
 
41
 
42
+ # Function to convert text to speech (OpenAI's TTS usage, adjust if needed)
43
+ def convert_text_to_speech(text):
44
+ start_time = time.time()
45
+ try:
46
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
47
+ response = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
48
+
49
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
50
+ for chunk in response.iter_bytes():
51
+ tmp_file.write(chunk)
52
+ temp_audio_path = tmp_file.name
53
+
54
+ print(f"DEBUG - Text-to-speech conversion time: {time.time() - start_time:.2f} seconds")
55
+ return temp_audio_path
56
+ except Exception as e:
57
+ print(f"Error during text-to-speech conversion: {e}")
58
+ return None
59
+
60
+
61
+ # Function to transcribe audio (OpenAI Whisper usage, adjust if needed)
62
+ def transcribe_audio(audio_file_path):
63
+ start_time = time.time()
64
+ try:
65
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
66
+ with open(audio_file_path, "rb") as audio_file:
67
+ transcription = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
68
+ print(f"DEBUG - Audio transcription time: {time.time() - start_time:.2f} seconds")
69
+ return transcription.text
70
+ except Exception as e:
71
+ print(f"Error during audio transcription: {e}")
72
+ return None
73
 
 
 
74
 
75
  def conduct_interview(questions, language="English", history_limit=5):
76
+ """
77
+ Sets up a function (interview_step) that handles each round of Q&A.
78
+ Returns (interview_step, initial_message, final_message).
79
+ """
80
+ start_time = time.time()
81
  openai_api_key = os.getenv("OPENAI_API_KEY")
82
  if not openai_api_key:
83
+ raise RuntimeError("OpenAI API key not found. Please add it to your .env or set it in env variables.")
 
 
84
 
85
+ # LangChain-based ChatOpenAI
86
  chat = ChatOpenAI(
87
+ openai_api_key=openai_api_key,
88
+ model="gpt-4o", # or "gpt-3.5-turbo", etc.
89
+ temperature=0.7,
90
+ max_tokens=750
91
  )
92
 
93
  conversation_history = deque(maxlen=history_limit)
94
  system_prompt = (
95
  f"You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}. "
96
+ "You respond politely, concisely, and provide clarifications if needed. "
97
+ "Ask only ONE question at a time. Wait for the user to respond before asking the next question. "
98
+ "Provide a very brief, positive acknowledgement of the user's response, *then* ask the next question. "
99
+ "Limit follow-up questions to a maximum of ONE per main interview question to keep the interview concise." # Added instruction for single follow-up
100
+ "If the user provides strange answers, give maximum one feedback and continue with the next question. Do not ask more follow up questions if the answer is strange."
101
+ "After the last interview question is answered by the user, ask 'Do you have any questions for me?'. "
102
+ "If the user asks questions, answer them concisely and politely. After answering user questions, or if the user says they have no questions, deliver the final message: '{final_message_placeholder}'. "
103
+ "Keep track of the interview stage and manage the conversation flow accordingly."
104
  )
105
 
106
+ current_question_index = [0] # Store index in a list so it's mutable in nested func
107
+ is_interview_finished = [False] # Use a list for mutability
108
+ interview_transcript = [] # List to store full interview history for saving
109
+ follow_up_count = [0] # Counter for follow-up questions within the current main question
110
+ interview_stage = ["questioning"] # "questioning", "user_questions_prompt", "answering_user_questions", "final_message_stage", "finished"
111
+ user_questions_asked = [False] # Flag to track if "Do you have any questions?" has been asked
112
 
113
  initial_message = (
114
  "πŸ‘‹ Hi there, I'm Sarah, your friendly AI HR assistant! "
115
  "I'll guide you through a series of interview questions to learn more about you. "
116
  "Take your time and answer each question thoughtfully."
117
  )
118
+ final_message_content = (
119
+ "That wraps up our interview. Thank you for your responsesβ€”it's been great learning more about you!"
120
+ " I will share the feedback with HR Team, and they will reach out to you soon." # added line
121
+ )
122
 
123
+ updated_system_prompt = system_prompt.replace("{final_message_placeholder}", final_message_content)
124
 
 
 
 
 
 
 
 
 
125
 
126
+ print(f"DEBUG - conduct_interview setup time: {time.time() - start_time:.2f} seconds")
127
+
128
+ def interview_step(user_input, audio_input, history):
129
+ """
130
+ Called each time the user clicks submit or finishes audio recording.
131
+ `history` is a list of { 'role': '...', 'content': '...' } messages.
132
+ We must return an updated version of that list in the same format.
133
+ """
134
+ nonlocal current_question_index, is_interview_finished, interview_transcript, follow_up_count, interview_stage, user_questions_asked
135
+
136
+ step_start_time = time.time()
137
+
138
+ # If there's audio, transcribe it.
139
+ if audio_input:
140
+ transcript = transcribe_audio(audio_input)
141
+ user_input = transcript if transcript else user_input # Use transcribed text if available
142
+
143
+ # If user typed "exit" or "quit"
144
+ if user_input.strip().lower() in ["exit", "quit"]:
145
+ history.append({
146
+ "role": "assistant",
147
+ "content": "The interview has ended at your request. Thank you for your time!"
148
+ })
149
+ is_interview_finished[0] = True
150
+ save_interview_history(interview_transcript) # Save history before exit
151
+ return history, "", None
152
+
153
+ # If the interview is already finished, do nothing.
154
+ if is_interview_finished[0]:
155
+ return history, "", None
156
+
157
+ # Add user's input to history
158
+ history.append({"role": "user", "content": user_input})
159
+ interview_transcript.append({"role": "user", "content": user_input}) # Add to transcript
160
 
161
+ #This is a new user response, add to the short history
162
+ conversation_history.append({
163
+ "question": questions[current_question_index[0]] if current_question_index[0] < len(questions) and interview_stage[0] == "questioning" else ("User Question" if interview_stage[0] == "answering_user_questions" else "End of interview"), # to handle index out of bound during final step
164
+ "answer": user_input
165
+ })
166
 
167
+ # Build the prompt
168
+ short_history = "\n".join([
169
+ f"Q: {entry['question']}\nA: {entry['answer']}"
170
+ for entry in conversation_history
171
+ ])
172
 
 
173
 
174
+ messages = []
 
175
 
176
+ if interview_stage[0] == "questioning":
177
+ # Normal question flow
178
+ combined_prompt = (
179
+ f"{updated_system_prompt}\n\nPrevious Q&A:\n{short_history}\n\n"
180
+ f"User's input: {user_input}\n\n"
181
+ "Acknowledge the user's answer briefly, then ask the *next* question, unless this was the last question."
182
+ )
183
+ messages = [
184
+ SystemMessage(content=updated_system_prompt),
185
+ HumanMessage(content=combined_prompt),
186
+ ]
187
 
188
+ elif interview_stage[0] == "user_questions_prompt" or interview_stage[0] == "answering_user_questions":
189
+ # Handling user questions phase
190
+ combined_prompt = (
191
+ f"{updated_system_prompt}\n\nPrevious Q&A:\n{short_history}\n\n"
192
+ f"User's input (User Question): {user_input}\n\n"
193
+ "Answer the user's question concisely and politely. If the user says they have no questions or similar, then deliver the final message."
194
+ )
195
+ messages = [
196
+ SystemMessage(content=updated_system_prompt),
197
+ HumanMessage(content=combined_prompt),
198
+ ]
199
+ elif interview_stage[0] == "final_message_stage":
200
+ # Should not reach here as final message is sent directly and stage becomes "finished"
201
+ pass
202
+ elif interview_stage[0] == "finished":
203
+ return history, "", None # Interview is finished
204
 
 
205
 
206
+ if messages: # Proceed only if messages are prepared (not in final_message_stage or finished)
207
+ # Ask ChatOpenAI
208
+ response = chat.invoke(messages)
209
+ response_content = response.content.strip()
210
 
211
+ history.append({"role": "assistant", "content": response_content})
212
+ interview_transcript.append({"role": "assistant", "content": response_content}) # Add to transcript
 
 
213
 
214
+ # Convert the LLM's answer to speech
215
+ audio_file_path = convert_text_to_speech(response_content)
216
  else:
217
+ audio_file_path = None
 
 
 
218
 
 
 
219
 
220
+ if interview_stage[0] == "questioning":
221
+ # Advance to the next question or handle end of questions
 
 
 
 
222
 
223
+ follow_up_count[0] = 0 # Reset follow-up counter for the next main question
224
+ if current_question_index[0] < len(questions) -1 : # Check against len(questions) - 1
225
+ current_question_index[0] += 1
226
+ print(f"DEBUG - question index {current_question_index[0]}")
227
+ print("DEBUG - Moving to next main question.")
228
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
229
+ return history, "", audio_file_path # Return current audio
230
+ else:
231
+ # Last question answered, ask "Do you have any questions?"
232
+ if not user_questions_asked[0]:
233
+ user_questions_prompt_message = "Thank you for your answer. Do you have any questions for me?"
234
+ user_questions_audio_path = convert_text_to_speech(user_questions_prompt_message)
235
+ history.append({"role": "assistant", "content": user_questions_prompt_message})
236
+ interview_transcript.append({"role": "assistant", "content": user_questions_prompt_message})
237
+ interview_stage[0] = "user_questions_prompt"
238
+ user_questions_asked[0] = True # Ensure this prompt is only asked once
239
+ print("DEBUG - Asked 'Do you have any questions?'")
240
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
241
+ return history, "", user_questions_audio_path
242
+ else:
243
+ # This should not be reached in normal flow for last question, but as a fallback.
244
+ pass # Fallthrough to handle user questions or finalize below
245
+
246
+ if interview_stage[0] == "user_questions_prompt":
247
+ # Check if user has questions or says no questions
248
+ if user_input.strip().lower() in ["no", "no questions", "none", "nothing", "that's all", "no, thank you"]:
249
+ final_audio_path = convert_text_to_speech(final_message_content)
250
+ history.append({"role": "assistant", "content": final_message_content})
251
+ interview_transcript.append({"role": "assistant", "content": final_message_content})
252
+ interview_stage[0] = "finished"
253
+ is_interview_finished[0] = True
254
+ save_interview_history(interview_transcript) # Save history at the end
255
+ print("DEBUG - Interview finished after user said no questions.")
256
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
257
+ return history, "", final_audio_path
258
+ else:
259
+ # User asked a question, move to answering stage
260
+ interview_stage[0] = "answering_user_questions"
261
+ print("DEBUG - User asked a question, moving to answering stage.")
262
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
263
+ return history, "", audio_file_path # Respond with the AI's answer to user's question in the 'messages' processing block
264
+
265
+ elif interview_stage[0] == "answering_user_questions":
266
+ # After answering user question, go back to user_questions_prompt to allow more questions or finalize
267
+ interview_stage[0] = "user_questions_prompt"
268
+ print("DEBUG - Answered user question, back to user_questions_prompt.")
269
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
270
+ return history, "", audio_file_path # Already responded in 'messages' block
271
+
272
+ elif interview_stage[0] == "final_message_stage": # Redundant stage, final message sent directly when no more questions
273
+ pass # Should not reach here
274
 
275
+ elif interview_stage[0] == "finished":
276
+ return history, "", None # Interview already finished
 
277
 
278
+ print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds")
279
+ return history, "", audio_file_path
280
 
 
281
 
282
+ # Return the step function plus initial/final text
283
+ return interview_step, initial_message, final_message_content
284
 
285
 
286
+ def main():
287
  QUESTIONS_FILE_PATH = "questions.json"
288
+ try:
289
+ questions = read_questions_from_json(QUESTIONS_FILE_PATH)
290
+ num_questions = len(questions) # Count the number of questions
291
+ print(f"Loaded {num_questions} questions from {QUESTIONS_FILE_PATH}") # Inform user about question count
292
+ except Exception as e:
293
+ print(f"Error reading questions: {e}")
294
+ return
295
 
296
+ try:
297
+ interview_func, initial_message, final_message = conduct_interview(questions)
298
+ except Exception as e:
299
+ print(f"Error setting up interview: {e}")
300
+ return
301
+
302
+ css = """
303
+ .contain { display: flex; flex-direction: column; }
304
+ .gradio-container { height: 100vh !important; overflow-y: auto; }
305
+ #component-0 { height: 100%; }
306
+ .chatbot { flex-grow: 1; overflow: auto; height: 650px; }
307
+ .user > div > .message { background-color: #dcf8c6 !important }
308
+ .bot > div > .message { background-color: #f7f7f8 !important }
309
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
 
311
+ initial_api_key_status_message = "API Key Status: Checking..."
312
+
313
+ # Build Gradio interface
314
+ with gr.Blocks(css=css) as demo:
315
+ gr.Markdown(
316
+ "<h1 style='text-align:center;'>πŸ‘‹ AI HR Interview Assistant</h1>"
317
+ )
318
+ gr.Markdown(
319
+ "I will ask you a series of questions. Please answer honestly and thoughtfully. "
320
+ "When you are ready, click **Start Interview** to begin."
321
+ )
322
+
323
+ start_btn = gr.Button("Start Interview", variant="primary")
324
+ chatbot = gr.Chatbot(
325
+ label="Interview Chat",
326
+ height=650,
327
+ type='messages' # must return a list of dicts: {"role":..., "content":...}
328
+ )
329
+ audio_input = gr.Audio(
330
+ sources=["microphone"],
331
+ type="filepath",
332
+ label="Record Your Answer"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  )
334
+ user_input = gr.Textbox(
335
+ label="Your Response",
336
+ placeholder="Type your answer here or use the microphone...",
337
+ lines=1,
338
+ )
339
+ audio_output = gr.Audio(label="Response Audio", autoplay=True)
340
 
341
  with gr.Row():
342
+ submit_btn = gr.Button("Submit", variant="primary")
343
+ clear_btn = gr.Button("Clear Chat")
 
 
 
 
344
 
345
+ # Admin Panel Tab
346
+ with gr.Tab("Admin Panel", id="admin_tab"):
347
+ with gr.Tab("API Key Settings"):
348
+ gr.Markdown("### OpenAI API Key Configuration")
349
+ api_key_input = gr.Textbox(label="Enter your OpenAI API Key", type="password", placeholder="β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’β€’")
350
+ api_key_status_output = gr.Textbox(label="API Key Status", value=initial_api_key_status_message, interactive=False)
351
+ update_api_key_button = gr.Button("Update API Key")
352
+ gr.Markdown("*This application does not store your API key. It is used only for this session and is not persisted when you close the app.*")
353
+
354
+ def update_api_key(api_key):
355
+ os.environ["OPENAI_API_KEY"] = api_key # Caution: Modifying os.environ is session-based
356
+ global interview_func, initial_message, final_message # Declare globals to update them
357
+ try:
358
+ interview_func, initial_message, final_message = conduct_interview(questions) # Re-init interview function
359
+ return "βœ… API Key Updated and Loaded."
360
+ except RuntimeError as e:
361
+ return f"❌ API Key Update Failed: {e}"
362
+
363
+
364
+ update_api_key_button.click(
365
+ update_api_key,
366
+ inputs=[api_key_input],
367
+ outputs=[api_key_status_output],
368
+ )
369
 
 
 
 
370
 
371
+ # with gr.Tab("Generate Questions"):
 
372
  with gr.Tab("Generate Questions"):
373
  try:
374
+ # Assuming these are defined in backend2.py
375
+ from backend2 import (
376
+ load_json_data,
377
+ PROFESSIONS_FILE,
378
+ TYPES_FILE,
379
+ generate_questions_manager,
380
+ update_max_questions,
381
+ generate_and_save_questions_from_pdf3,
382
+ generate_questions_from_job_description,
383
+ cleanup
384
+ )
385
+
386
  professions_data = load_json_data(PROFESSIONS_FILE)
387
+ types_data = load_json_data(TYPES_FILE)
388
+
389
  except (FileNotFoundError, json.JSONDecodeError) as e:
390
  print(f"Error loading data from JSON files: {e}")
391
  professions_data = []
392
+ types_data = []
393
 
394
  profession_names = [
395
  item["profession"] for item in professions_data
396
+ ] if professions_data else []
397
+
398
+ interview_types = [
399
+ item["type"] for item in types_data
400
+ ] if types_data else []
401
 
402
  with gr.Row():
403
  profession_input = gr.Dropdown(
404
+ label="Select Profession",
405
+ choices=profession_names
406
  )
407
  interview_type_input = gr.Dropdown(
408
+ label="Select Interview Type",
409
+ choices=interview_types
410
  )
411
 
412
  num_questions_input = gr.Number(
 
419
  overwrite_input = gr.Checkbox(
420
  label="Overwrite all_questions.json?", value=True
421
  )
422
+
423
  # Update num_questions_input when interview_type_input changes
424
  interview_type_input.change(
425
  fn=update_max_questions,
426
  inputs=interview_type_input,
427
  outputs=num_questions_input,
428
  )
429
+
430
  generate_button = gr.Button("Generate Questions")
431
 
432
  output_text = gr.Textbox(label="Output")
 
447
  with gr.Tab("Generate from PDF"):
448
  gr.Markdown("### πŸ“„ Upload PDF for Question Generation")
449
  pdf_file_input = gr.File(label="Upload PDF File", type="filepath")
450
+ num_questions_pdf_input = gr.Number(
451
+ label="Number of Questions (1-30)",
452
+ value=5,
453
+ precision=0,
454
+ minimum=1,
455
+ maximum=30,
456
+ )
457
+
458
  pdf_status_output = gr.Textbox(label="Status", lines=3)
459
  pdf_question_output = gr.JSON(label="Generated Questions")
460
+
461
  generate_pdf_button = gr.Button("Generate Questions from PDF")
462
 
463
  def update_pdf_ui(pdf_path, num_questions):
464
+ print(f"[DEBUG] PDF Path: {pdf_path}")
465
+ print(f"[DEBUG] Requested Number of Questions: {num_questions}")
466
+
467
+ all_statuses = []
468
+ all_questions = []
469
+ print(f"[DEBUG] Calling generate_and_save_questions_from_pdf3 with {num_questions}")
470
  for status, questions in generate_and_save_questions_from_pdf3(pdf_path, num_questions):
471
+ print(f"[DEBUG] Status: {status}, Questions Generated: {len(questions)}")
472
+ all_statuses.append(status)
473
+ all_questions.append(questions)
474
+
475
+ combined_status = "\n".join(all_statuses)
476
+ final_questions = all_questions[-1] if all_questions else []
477
+
478
+ return gr.update(value=combined_status), gr.update(value=final_questions)
479
 
480
  generate_pdf_button.click(
481
  update_pdf_ui,
 
483
  outputs=[pdf_status_output, pdf_question_output],
484
  )
485
 
486
+ with gr.Tab("Generate from Job Description"):
487
+ gr.Markdown("### πŸ“ Enter Job Description for Question Generation")
488
+
489
+ job_description_input = gr.Textbox(label="Job Description", placeholder="Type or paste the job description here...", lines=6)
490
+ num_questions_job_input = gr.Number(
491
+ label="Number of Questions (1-30)",
492
+ value=5,
493
+ precision=0,
494
+ minimum=1,
495
+ maximum=30
496
+ )
497
+
498
+ job_status_output = gr.Textbox(label="Status", lines=3)
499
+ job_question_output = gr.JSON(label="Generated Questions")
500
+
501
+ generate_job_button = gr.Button("Generate Questions from Job Description")
502
+
503
+ def update_job_description_ui(job_description, num_questions):
504
+ print(f"[DEBUG] Job Description Length: {len(job_description)} characters")
505
+ print(f"[DEBUG] Requested Number of Questions: {num_questions}")
506
 
507
+ status, questions = generate_questions_from_job_description(job_description, num_questions)
508
+ return gr.update(value=status), gr.update(value=questions)
509
+
510
+ generate_job_button.click(
511
+ update_job_description_ui,
512
+ inputs=[job_description_input, num_questions_job_input],
513
+ outputs=[job_status_output, job_question_output],
514
+ )
515
 
516
 
517
+ # --- Gradio callback functions ---
 
 
518
 
519
+ def start_interview():
520
+ """
521
+ Resets the chat and provides an initial greeting and first question.
522
+ Must return a list of {'role':'assistant','content':'...'} messages
523
+ plus empty text for user_input and path for audio_output.
524
+ """
525
+ nonlocal interview_func, questions # Access questions from the outer scope
526
+ try:
527
+ questions = read_questions_from_json(QUESTIONS_FILE_PATH) # Reload questions in case file changed
528
+ interview_func, initial_message, final_message = conduct_interview(questions) # Re-init interview func with new questions
529
+ except Exception as e:
530
+ error_message = f"Error reloading questions or setting up interview: {e}. Please check questions.json and API Key."
531
+ print(error_message)
532
+ tts_path = convert_text_to_speech(error_message)
533
+ return [{"role": "assistant", "content": error_message}], "", tts_path # Return error message to chatbot
534
+
535
+ history = []
536
+ # Combine initial + the first question
537
+ if questions:
538
+ first_q_text = f" Let's begin! Here's your first question: {questions[0]}"
539
  else:
540
+ first_q_text = "No questions loaded. Please check questions.json or generate questions in the Admin Panel."
541
+
542
+ combined = initial_message + first_q_text
543
+ tts_path = convert_text_to_speech(combined)
544
+
545
+ # Return one assistant message to the Chatbot
546
+ history.append({"role": "assistant", "content": combined})
547
+ return history, "", tts_path
548
 
549
+ def interview_step_wrapper(user_response, audio_response, history):
550
+ """
551
+ Wrap the 'interview_func' so we always return the correct format:
552
+ (list_of_dicts, str, audio_file_path).
553
+ """
554
+ new_history, _, audio_path = interview_func(user_response, audio_response, history)
555
+ return new_history, "", audio_path
556
+
557
+ def on_enter_submit(history, user_text):
558
+ """
559
+ If user presses Enter in the textbox. Return updated Chatbot history,
560
+ empty user_input, and any audio.
561
+ """
562
+ if not user_text.strip():
563
+ # If empty, do nothing
564
+ return history, "", None
565
+ new_history, _, audio_path = interview_func(user_text, None, history)
566
+ return new_history, "", audio_path
567
+
568
+ def clear_chat():
569
+ """
570
+ Re-initialize the interview function entirely
571
+ to start from scratch, clearing the Chatbot.
572
+ """
573
+ nonlocal interview_func, initial_message, final_message, questions # Access questions
574
+ interview_func, initial_msg, final_msg = conduct_interview(questions) # Re-init with current questions
575
+ return [], "", None
576
+
577
+ # --- Wire up the event handlers ---
578
 
579
+ # 1) Start button
580
+ start_btn.click(
581
+ start_interview,
582
+ inputs=[],
583
+ outputs=[chatbot, user_input, audio_output]
584
  )
585
 
586
+ # 2) Audio: when recording stops
587
+ audio_input.stop_recording(
588
+ interview_step_wrapper,
589
+ inputs=[user_input, audio_input, chatbot],
590
+ outputs=[chatbot, user_input, audio_output]
591
+ )
592
 
593
+ # 3) Submit button
594
+ submit_btn.click(
595
+ interview_step_wrapper,
596
+ inputs=[user_input, audio_input, chatbot],
597
+ outputs=[chatbot, user_input, audio_output]
598
+ )
599
+
600
+ # 4) Pressing Enter in the textbox
601
+ user_input.submit(
602
+ on_enter_submit,
603
+ inputs=[chatbot, user_input],
604
+ outputs=[chatbot, user_input, audio_output]
605
+ )
606
+
607
+ # 5) Clear button
608
+ clear_btn.click(
609
+ clear_chat,
610
+ inputs=[],
611
+ outputs=[chatbot, user_input, audio_output]
612
+ )
613
+
614
+ # Launch Gradio (remove `share=True` if it keeps failing)
615
+ demo.launch(
616
+ server_name="0.0.0.0",
617
+ server_port=7860,
618
+ # share=True # Remove or comment out if you get share-link errors
619
+ )
620
 
621
 
622
  if __name__ == "__main__":
623
+ main()