ruslanmv commited on
Commit
ea0bc5e
Β·
verified Β·
1 Parent(s): 78b8b85

Upload backend3.py

Browse files
Files changed (1) hide show
  1. backend3.py +453 -0
backend3.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tempfile
3
+ import os
4
+ import json
5
+ from io import BytesIO
6
+ from collections import deque
7
+ from dotenv import load_dotenv
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain.schema import HumanMessage, SystemMessage
10
+ from langchain.chains import LLMChain
11
+ from langchain.prompts import PromptTemplate
12
+ from openai import OpenAI
13
+ import time
14
+
15
+ # Imports - Keep only what's actually used. I've organized them.
16
+ from generatorgr import (
17
+ generate_and_save_questions as generate_questions_manager,
18
+ update_max_questions,
19
+ )
20
+ from generator import (
21
+ PROFESSIONS_FILE,
22
+ TYPES_FILE,
23
+ OUTPUT_FILE,
24
+ load_json_data,
25
+ generate_questions, # Keep if needed, but ensure it exists
26
+ )
27
+ from splitgpt import (
28
+ generate_and_save_questions_from_pdf3,
29
+ generate_questions_from_job_description,
30
+ )
31
+ # ai_config.py is no longer directly imported, functions are redefined here to handle missing API key.
32
+ # from ai_config import convert_text_to_speech # Redundant import, redefined below.
33
+ from knowledge_retrieval import get_next_response, get_initial_question
34
+ from prompt_instructions import get_interview_initial_message_hr
35
+ from settings import language
36
+ from utils import save_interview_history
37
+ from tools import store_interview_report, read_questions_from_json
38
+
39
+ load_dotenv() # Load .env variables
40
+
41
+ class InterviewState:
42
+ """Manages the state of the interview."""
43
+
44
+ def __init__(self):
45
+ self.reset()
46
+
47
+ def reset(self, voice="alloy"):
48
+ self.question_count = 0
49
+ # Corrected history format: List of [user_msg, bot_msg] pairs.
50
+ self.interview_history = []
51
+ self.selected_interviewer = voice
52
+ self.interview_finished = False
53
+ self.audio_enabled = True
54
+ self.temp_audio_files = []
55
+ self.initial_audio_path = None
56
+ self.interview_chain = None
57
+ self.report_chain = None
58
+ self.current_questions = []
59
+ self.history_limit = 5 # Limit the history (good for performance)
60
+
61
+ def get_voice_setting(self):
62
+ return self.selected_interviewer
63
+
64
+ interview_state = InterviewState()
65
+
66
+ def initialize_chains():
67
+ """Initializes the LangChain LLM chains."""
68
+ openai_api_key = os.getenv("OPENAI_API_KEY")
69
+ if not openai_api_key:
70
+ print("OpenAI API key not found. Chains will not be initialized.")
71
+ interview_state.interview_chain = None # Set to None to indicate not initialized
72
+ interview_state.report_chain = None
73
+ return False # Indicate chains were not initialized
74
+ try:
75
+ llm = ChatOpenAI(
76
+ openai_api_key=openai_api_key, model="gpt-4o", temperature=0.7, max_tokens=750
77
+ )
78
+ interview_prompt_template = """
79
+ You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}.
80
+ Current Question: {current_question}
81
+ Previous conversation history:
82
+ {history}
83
+ User's response to current question: {user_input}
84
+ Your response:
85
+ """
86
+ interview_prompt = PromptTemplate(
87
+ input_variables=["language", "current_question", "history", "user_input"],
88
+ template=interview_prompt_template,
89
+ )
90
+ interview_state.interview_chain = LLMChain(prompt=interview_prompt, llm=llm)
91
+
92
+ report_prompt_template = """
93
+ You are an HR assistant tasked with generating a concise report based on the following interview transcript in {language}:
94
+ {interview_transcript}
95
+ Summarize the candidate's performance, highlighting strengths and areas for improvement. Keep it to 3-5 sentences.
96
+ Report:
97
+ """
98
+ report_prompt = PromptTemplate(
99
+ input_variables=["language", "interview_transcript"], template=report_prompt_template
100
+ )
101
+ interview_state.report_chain = LLMChain(prompt=report_prompt, llm=llm)
102
+ return True # Indicate chains were initialized
103
+ except Exception as e:
104
+ print(f"Error initializing chains: {e}")
105
+ interview_state.interview_chain = None
106
+ interview_state.report_chain = None
107
+ return False # Indicate chains were not initialized
108
+
109
+
110
+ def generate_report(report_chain, history, language):
111
+ """Generates a concise interview report."""
112
+ if report_chain is None:
113
+ return "Report generation is unavailable because the API key is not set." # Handle uninitialized chain
114
+ # Convert the Gradio-style history to a plain text transcript.
115
+ transcript = ""
116
+ for user_msg, bot_msg in history:
117
+ transcript += f"User: {user_msg}\nAssistant: {bot_msg}\n"
118
+ report = report_chain.invoke({"language": language, "interview_transcript": transcript})
119
+ return report["text"]
120
+
121
+ def reset_interview_action(voice):
122
+ """Resets the interview state and prepares the initial message."""
123
+ interview_state.reset(voice)
124
+ if not initialize_chains(): # Initialize chains and check if successful
125
+ initial_message_text = "OpenAI API key is not configured. Please set it in the Admin Panel to start the interview with full functionality."
126
+ initial_audio_path = convert_text_to_speech_updated(initial_message_text) # Still try TTS for error message
127
+ return (
128
+ [[None, initial_message_text]], # [user_msg, bot_msg]. User starts with None.
129
+ gr.Audio(value=initial_audio_path, autoplay=True) if initial_audio_path else None, # Audio output might be None
130
+ gr.Textbox(interactive=False), # Disable textbox if API key is missing, or keep interactive? Let's keep disabled for now.
131
+ )
132
+
133
+ print(f"[DEBUG] Interview reset. Voice: {voice}")
134
+ initial_message_text = get_interview_initial_message_hr(5) # Get initial message
135
+ # Convert to speech and save to a temporary file.
136
+ initial_audio_path = convert_text_to_speech_updated(initial_message_text, voice)
137
+
138
+ # Return values in the correct format for Gradio.
139
+ return (
140
+ [[None, initial_message_text]], # [user_msg, bot_msg]. User starts with None.
141
+ gr.Audio(value=initial_audio_path, autoplay=True) if initial_audio_path else None, # Audio output might be None
142
+ gr.Textbox(interactive=True), # Enable the textbox
143
+ )
144
+
145
+ def start_interview():
146
+ """Starts the interview (used by the Gradio button)."""
147
+ return reset_interview_action(interview_state.selected_interviewer)
148
+
149
+ def construct_history_string(history):
150
+ """Constructs a history string for the LangChain prompt."""
151
+ history_str = ""
152
+ for user_msg, bot_msg in history:
153
+ history_str += f"User: {user_msg}\nAssistant: {bot_msg}\n"
154
+ return history_str
155
+
156
+ def bot_response(chatbot, user_message_text):
157
+ """Handles the bot's response logic."""
158
+ voice = interview_state.get_voice_setting()
159
+ history_str = construct_history_string(chatbot)
160
+
161
+ if interview_state.interview_chain is None: # Check if chain is initialized
162
+ chatbot.append([user_message_text, "Please set up the OpenAI API key in the Admin Panel to continue the interview."])
163
+ return chatbot, None, gr.File(visible=False) # No audio or report if chain is not initialized
164
+
165
+ if interview_state.question_count < len(interview_state.current_questions):
166
+ current_question = interview_state.current_questions[interview_state.question_count]
167
+ response_obj = interview_state.interview_chain.invoke(
168
+ {
169
+ "language": language,
170
+ "current_question": current_question,
171
+ "history": history_str,
172
+ "user_input": user_message_text,
173
+ }
174
+ )
175
+ response = response_obj["text"]
176
+ interview_state.question_count += 1
177
+ # Text-to-speech
178
+ temp_audio_path = convert_text_to_speech_updated(response, voice)
179
+
180
+ # Update chatbot history in the correct format.
181
+ chatbot.append([user_message_text, response]) # Add user and bot messages
182
+ return chatbot, gr.Audio(value=temp_audio_path, autoplay=True) if temp_audio_path else None, gr.File(visible=False)
183
+
184
+ else: # Interview finished
185
+ interview_state.interview_finished = True
186
+ conclusion_message = "Thank you for your time. The interview is complete. Please review your report."
187
+ # Text-to-speech for conclusion
188
+ temp_conclusion_audio_path = convert_text_to_speech_updated(conclusion_message, voice)
189
+
190
+ # Update chatbot history.
191
+ chatbot.append([user_message_text, conclusion_message])
192
+ # Generate and save report.
193
+ report_content = generate_report(
194
+ interview_state.report_chain, chatbot, language
195
+ ) # Pass Gradio history
196
+ txt_path = save_interview_history(
197
+ [f"User: {user}\nAssistant: {bot}" for user, bot in chatbot], language
198
+ ) # Create plain text history
199
+ report_file_path = store_interview_report(report_content)
200
+ print(f"[DEBUG] Interview report saved at: {report_file_path}")
201
+ return (
202
+ chatbot,
203
+ gr.Audio(value=temp_conclusion_audio_path, autoplay=True) if temp_conclusion_audio_path else None,
204
+ gr.File(visible=True, value=txt_path),
205
+ )
206
+
207
+ def convert_text_to_speech_updated(text, voice="alloy"):
208
+ """Converts text to speech and returns the file path, handles missing API key."""
209
+ api_key = os.getenv("OPENAI_API_KEY")
210
+ if not api_key:
211
+ print("API key is missing, text-to-speech disabled.")
212
+ return None # Return None when API key is missing
213
+
214
+ try:
215
+ client = OpenAI(api_key=api_key)
216
+ response = client.audio.speech.create(model="tts-1", voice=voice, input=text)
217
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
218
+ for chunk in response.iter_bytes():
219
+ tmp_file.write(chunk)
220
+ temp_audio_path = tmp_file.name
221
+ return temp_audio_path
222
+ except Exception as e:
223
+ print(f"Error in text-to-speech: {e}")
224
+ return None
225
+
226
+ def transcribe_audio(audio_file_path):
227
+ """Transcribes audio to text, handles missing API key."""
228
+ api_key = os.getenv("OPENAI_API_KEY")
229
+ if not api_key:
230
+ print("API key is missing, audio transcription disabled.")
231
+ return "" # Return empty string, transcription is unavailable
232
+
233
+ try:
234
+ client = OpenAI(api_key=api_key)
235
+ with open(audio_file_path, "rb") as audio_file:
236
+ transcription = client.audio.transcriptions.create(
237
+ model="whisper-1", file=audio_file
238
+ )
239
+ return transcription.text
240
+ except Exception as e:
241
+ print(f"Error in transcription: {e}")
242
+ return ""
243
+
244
+ def conduct_interview_updated(questions, language="English", history_limit=5):
245
+ """Conducts the interview (LangChain/OpenAI), handles missing API key."""
246
+ openai_api_key = os.getenv("OPENAI_API_KEY")
247
+ if not openai_api_key:
248
+ # Return a placeholder interview step if API key is missing
249
+ initial_message = "⚠️ OpenAI API Key not configured. Please enter your API key in the Admin Panel to start the interview with full functionality. Text responses will be displayed, but advanced features are disabled."
250
+ placeholder_audio_path = convert_text_to_speech_updated(initial_message)
251
+
252
+ def placeholder_interview_step(user_input, audio_input, history):
253
+ history.append([None, initial_message]) # bot message in history
254
+ return history, "", placeholder_audio_path, gr.Textbox(interactive=False) # Textbox disabled
255
+
256
+ return placeholder_interview_step, initial_message, "API key missing" # Return placeholder and flag
257
+
258
+ chat = ChatOpenAI(
259
+ openai_api_key=openai_api_key, model="gpt-4o", temperature=0.7, max_tokens=750
260
+ )
261
+ conversation_history = deque(maxlen=history_limit) # For LangChain, not Gradio
262
+ system_prompt = (
263
+ f"You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}. "
264
+ "Respond to user follow-up questions politely and concisely. Keep responses brief."
265
+ )
266
+ interview_data = [] # Store Q&A for potential later use
267
+ current_question_index = [0]
268
+ is_interview_finished = [False]
269
+ initial_message = (
270
+ "πŸ‘‹ Hi there, I'm Sarah, your friendly AI HR assistant! "
271
+ "I'll guide you through a series of interview questions. "
272
+ "Take your time."
273
+ )
274
+ final_message = "That wraps up our interview. Thank you for your responses!"
275
+
276
+ def interview_step(user_input, audio_input, history):
277
+ nonlocal current_question_index, is_interview_finished
278
+ if is_interview_finished[0]:
279
+ return history, "", None, gr.Textbox(interactive=False) # No further interaction, textbox disabled
280
+
281
+ if audio_input:
282
+ user_input = transcribe_audio(audio_input)
283
+ if not user_input:
284
+ history.append(["", "I couldn't understand your audio. Could you please repeat or type?"]) #Empty string "" so the user input is not None
285
+ audio_path = convert_text_to_speech_updated(history[-1][1]) #Access the content
286
+ return history, "", audio_path, gr.Textbox(interactive=True) # Keep textbox interactive
287
+
288
+ if user_input.lower() in ["exit", "quit"]:
289
+ history.append(["", "The interview has ended. Thank you."])#Empty string "" so the user input is not None
290
+ is_interview_finished[0] = True
291
+ return history, "", None, gr.Textbox(interactive=False) # Disable textbox after exit
292
+
293
+ # Crucial: Add USER INPUT to history *before* getting bot response.
294
+ history.append([user_input, ""]) # Add user input, bot response pending
295
+ question_text = questions[current_question_index[0]]
296
+
297
+ # Prepare history for LangChain (not Gradio chatbot format)
298
+ history_content = "\n".join(
299
+ [
300
+ f"Q: {entry['question']}\nA: {entry['answer']}"
301
+ for entry in conversation_history
302
+ ]
303
+ )
304
+ combined_prompt = (
305
+ f"{system_prompt}\n\nPrevious conversation history:\n{history_content}\n\n"
306
+ f"Current question: {question_text}\nUser's input: {user_input}\n\n"
307
+ "Respond warmly."
308
+ )
309
+ messages = [
310
+ SystemMessage(content=system_prompt),
311
+ HumanMessage(content=combined_prompt),
312
+ ]
313
+ response = chat.invoke(messages)
314
+ response_content = response.content.strip()
315
+ audio_path = convert_text_to_speech_updated(response_content)
316
+ conversation_history.append({"question": question_text, "answer": user_input})
317
+ interview_data.append({"question": question_text, "answer": user_input})
318
+
319
+ # Update Gradio-compatible history. Crucial for display.
320
+ history[-1][1] = response_content # Update the last entry with the bot's response
321
+
322
+ interactive_textbox = gr.Textbox(interactive=True) # Keep textbox interactive in most steps
323
+
324
+ if current_question_index[0] + 1 < len(questions):
325
+ current_question_index[0] += 1
326
+ next_question = f"Next question: {questions[current_question_index[0]]}"
327
+ next_question_audio_path = convert_text_to_speech_updated(next_question)
328
+ # No need to add the "Next Question:" prompt to the displayed history.
329
+ # The bot will say it. Adding it here would cause a double entry.
330
+ return history, "", next_question_audio_path, interactive_textbox
331
+ else:
332
+ final_message_audio = convert_text_to_speech_updated(final_message)
333
+ history.append([None, final_message]) # Final message, no user input.
334
+ is_interview_finished[0] = True
335
+ interactive_textbox = gr.Textbox(interactive=False) # Disable textbox at the end
336
+ return history, "", final_message_audio, interactive_textbox
337
+
338
+ return interview_step, initial_message, final_message
339
+
340
+
341
+ def launch_candidate_app_updated():
342
+ """Launches the Gradio app for candidates."""
343
+ QUESTIONS_FILE_PATH = "questions.json"
344
+ try:
345
+ questions = read_questions_from_json(QUESTIONS_FILE_PATH)
346
+ if not questions:
347
+ raise ValueError("No questions found.")
348
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
349
+ print(f"Error loading questions: {e}")
350
+ with gr.Blocks() as error_app:
351
+ gr.Markdown(f"# Error: {e}")
352
+ return error_app
353
+
354
+ interview_func, initial_message, api_status = conduct_interview_updated(questions) # Get API status
355
+
356
+ def start_interview_ui():
357
+ """Starts the interview."""
358
+ history = []
359
+ if api_status == "API key missing": # Check API status from conduct_interview_updated
360
+ initial_combined = initial_message # Initial message already indicates API key missing
361
+ textbox_interactive = gr.Textbox(interactive=False) # Disable textbox if API key missing
362
+ else:
363
+ initial_combined = (
364
+ initial_message + " Let's begin! Here's the first question: " + questions[0]
365
+ )
366
+ textbox_interactive = gr.Textbox(interactive=True) # Enable textbox if API key OK
367
+
368
+ initial_audio_path = convert_text_to_speech_updated(initial_combined)
369
+ history.append(["", initial_combined]) # Correct format: [user, bot] Empty string for user.
370
+ return history, "", initial_audio_path, textbox_interactive # Return interactive textbox status
371
+
372
+
373
+ def clear_interview_ui():
374
+ """Clears the interview and resets."""
375
+ # Recreate the object in order to clear the history of the interview
376
+ nonlocal interview_func, initial_message, api_status # Include api_status to reset properly
377
+ interview_func, initial_message, api_status = conduct_interview_updated(questions) # Re-init, get API status
378
+ textbox_interactive = gr.Textbox(interactive= (api_status != "API key missing")) # Enable if API key is OK after clear, disable if missing.
379
+ return [], "", None, textbox_interactive # Return textbox interactive state
380
+
381
+
382
+ def interview_step_wrapper(user_response, audio_response, history):
383
+ """Wrapper for the interview step function."""
384
+ history, user_text, audio_path, new_textbox_interactive = interview_func(user_response, audio_response, history)
385
+ return history, "", audio_path, new_textbox_interactive
386
+
387
+
388
+ def on_enter_submit(history, user_response):
389
+ """Handles submission when Enter is pressed."""
390
+ if not user_response.strip():
391
+ return history, "", None, gr.Textbox(interactive=True) # Prevent empty submissions
392
+ history, _, audio_path, new_textbox_interactive = interview_step_wrapper(
393
+ user_response, None, history
394
+ ) # No audio on Enter
395
+ return history, "", audio_path, new_textbox_interactive
396
+
397
+
398
+ with gr.Blocks(title="AI HR Interview Assistant") as candidate_app:
399
+ gr.Markdown(
400
+ "<h1 style='text-align: center;'>πŸ‘‹ Welcome to Your AI HR Interview Assistant</h1>"
401
+ )
402
+ start_btn = gr.Button("Start Interview", variant="primary")
403
+ chatbot = gr.Chatbot(label="Interview Chat", height=650)
404
+ audio_input = gr.Audio(
405
+ sources=["microphone"], type="filepath", label="Record Your Answer"
406
+ )
407
+ user_input = gr.Textbox(
408
+ label="Your Response",
409
+ placeholder="Type your answer here or use the microphone...",
410
+ lines=1,
411
+ interactive=True, # Textbox interactive status is controlled by functions
412
+ )
413
+ audio_output = gr.Audio(label="Response Audio", autoplay=True)
414
+
415
+ with gr.Row():
416
+ submit_btn = gr.Button("Submit", variant="primary")
417
+ clear_btn = gr.Button("Clear Chat")
418
+
419
+
420
+ start_btn.click(
421
+ start_interview_ui, inputs=[], outputs=[chatbot, user_input, audio_output, user_input] # user_input for textbox interactive status
422
+ )
423
+ audio_input.stop_recording(
424
+ interview_step_wrapper,
425
+ inputs=[user_input, audio_input, chatbot],
426
+ outputs=[chatbot, user_input, audio_output, user_input], # user_input for textbox interactive status
427
+ )
428
+ submit_btn.click(
429
+ interview_step_wrapper,
430
+ inputs=[user_input, audio_input, chatbot],
431
+ outputs=[chatbot, user_input, audio_output, user_input], # user_input for textbox interactive status
432
+ )
433
+ user_input.submit(
434
+ on_enter_submit,
435
+ inputs=[chatbot, user_input],
436
+ outputs=[chatbot, user_input, audio_output, user_input], # user_input for textbox interactive status
437
+ )
438
+ clear_btn.click(
439
+ clear_interview_ui, inputs=[], outputs=[chatbot, user_input, audio_output, user_input] # user_input for textbox interactive status
440
+ )
441
+
442
+ return candidate_app
443
+
444
+ # --- (End of Candidate Interview Implementation) ---
445
+
446
+ def cleanup():
447
+ """Cleans up temporary audio files."""
448
+ for audio_file in interview_state.temp_audio_files:
449
+ try:
450
+ if os.path.exists(audio_file):
451
+ os.unlink(audio_file)
452
+ except Exception as e:
453
+ print(f"Error deleting file {audio_file}: {e}")