Spaces:
Running
Running
import os | |
import json | |
import time | |
import tempfile | |
from collections import deque | |
import gradio as gr | |
from dotenv import load_dotenv | |
from langchain_openai import ChatOpenAI | |
from langchain.schema import HumanMessage, SystemMessage, AIMessage # Import AIMessage | |
from openai import OpenAI | |
from datetime import datetime # Import datetime for timestamp | |
# Load environment variables | |
load_dotenv() | |
# Initialize API key status message globally | |
initial_api_key_status_message = "Checking API Key..." | |
# Global variable for questions | |
questions = [] # Declare questions as a global variable | |
# Function to read questions from JSON | |
def read_questions_from_json(file_path): | |
if not os.path.exists(file_path): | |
raise FileNotFoundError(f"The file '{file_path}' does not exist.") | |
with open(file_path, 'r', encoding='utf-8') as f: | |
questions_list = json.load(f) | |
if not questions_list: | |
raise ValueError("The JSON file is empty or has invalid content.") | |
return questions_list | |
# Function to save interview history to JSON | |
def save_interview_history(history, filename="interview_history.json"): | |
"""Saves the interview history to a JSON file.""" | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filepath = f"{timestamp}_{filename}" | |
try: | |
with open(filepath, 'w', encoding='utf-8') as f: | |
json.dump(history, f, ensure_ascii=False, indent=4) | |
print(f"Interview history saved to: {filepath}") | |
except Exception as e: | |
print(f"Error saving interview history: {e}") | |
# Function to convert text to speech (OpenAI's TTS usage, adjust if needed) | |
def convert_text_to_speech(text): | |
start_time = time.time() | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
print("API key is missing, cannot perform text-to-speech.") | |
return None | |
try: | |
client = OpenAI(api_key=api_key) | |
response = client.audio.speech.create(model="tts-1", voice="alloy", input=text) | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: | |
for chunk in response.iter_bytes(): | |
tmp_file.write(chunk) | |
temp_audio_path = tmp_file.name | |
print(f"DEBUG - Text-to-speech conversion time: {time.time() - start_time:.2f} seconds") | |
return temp_audio_path | |
except Exception as e: | |
print(f"Error during text-to-speech conversion: {e}") | |
return None | |
# Function to transcribe audio (OpenAI Whisper usage, adjust if needed) | |
def transcribe_audio(audio_file_path): | |
start_time = time.time() | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
print("API key is missing, cannot perform audio transcription.") | |
return None | |
try: | |
client = OpenAI(api_key=api_key) | |
with open(audio_file_path, "rb") as audio_file: | |
transcription = client.audio.transcriptions.create(model="whisper-1", file=audio_file) | |
print(f"DEBUG - Audio transcription time: {time.time() - start_time:.2f} seconds") | |
return transcription.text | |
except Exception as e: | |
print(f"Error during audio transcription: {e}") | |
return None | |
def check_api_key(): | |
"""Checks if the OpenAI API key is valid.""" | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
return "❌ API Key Not Found. Please enter in Admin Panel." | |
try: | |
client = OpenAI(api_key=api_key) | |
client.models.list() # Simple API call to check if the key is working | |
return "✅ API Key Loaded." | |
except Exception as e: | |
return f"❌ API Key Invalid: {e}" | |
def conduct_interview(questions, language="English", history_limit=5): | |
""" | |
Sets up a function (interview_step) that handles each round of Q&A. | |
Returns (interview_step, initial_message, final_message). | |
""" | |
start_time = time.time() | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
initial_message = ( | |
"👋 Hi there, I'm Sarah, your friendly AI HR assistant! " | |
"I'll guide you through a series of interview questions to learn more about you. " | |
"Take your time and answer each question thoughtfully." | |
) | |
final_message_content = ( | |
"That wraps up our interview. Thank you for your responses—it's been great learning more about you!" | |
" I will share the feedback with HR Team, and they will reach out to you soon." # added line | |
) | |
if not openai_api_key: | |
placeholder_message = "⚠️ OpenAI API Key not configured. Please enter your API key in the Admin Panel to start the interview." | |
placeholder_audio_path = convert_text_to_speech(placeholder_message) | |
def placeholder_interview_step(user_input, audio_input, history): | |
history.append({"role": "assistant", "content": placeholder_message}) | |
return history, "", placeholder_audio_path | |
return placeholder_interview_step, initial_message, final_message_content | |
# LangChain-based ChatOpenAI | |
chat = ChatOpenAI( | |
openai_api_key=openai_api_key, | |
model="gpt-4o", # or "gpt-3.5-turbo", etc. | |
temperature=0.7, | |
max_tokens=750 | |
) | |
conversation_history = deque(maxlen=history_limit) | |
system_prompt = ( | |
f"You are Sarah, an empathetic HR interviewer conducting a technical interview in {language}. " | |
"You respond politely, concisely, and provide clarifications if needed. " | |
"Ask only ONE question at a time. Wait for the user to respond before asking the next question. " | |
"Provide a very brief, positive acknowledgement of the user's response, *then* ask the next question. " | |
"Limit follow-up questions to a maximum of ONE per main interview question to keep the interview concise." # Added instruction for single follow-up | |
"If the user provides strange answers, give maximum one feedback and continue with the next question. Do not ask more follow up questions if the answer is strange." | |
"After the last interview question is answered by the user, ask 'Do you have any questions for me?'. " | |
"If the user asks questions, answer them concisely and politely. After answering user questions, or if the user says they have no questions, deliver the final message: '{final_message_placeholder}'. " | |
"Keep track of the interview stage and manage the conversation flow accordingly." | |
) | |
current_question_index = [0] # Store index in a list so it's mutable in nested func | |
is_interview_finished = [False] # Use a list for mutability | |
interview_transcript = [] # List to store full interview history for saving | |
follow_up_count = [0] # Counter for follow-up questions within the current main question | |
interview_stage = ["questioning"] # "questioning", "user_questions_prompt", "answering_user_questions", "final_message_stage", "finished" | |
user_questions_asked = [False] # Flag to track if "Do you have any questions?" has been asked | |
updated_system_prompt = system_prompt.replace("{final_message_placeholder}", final_message_content) | |
print(f"DEBUG - conduct_interview setup time: {time.time() - start_time:.2f} seconds") | |
def interview_step(user_input, audio_input, history): | |
""" | |
Called each time the user clicks submit or finishes audio recording. | |
`history` is a list of { 'role': '...', 'content': '...' } messages. | |
We must return an updated version of that list in the same format. | |
""" | |
nonlocal current_question_index, is_interview_finished, interview_transcript, follow_up_count, interview_stage, user_questions_asked | |
step_start_time = time.time() | |
# Check if API key is configured before proceeding with OpenAI calls | |
if not os.getenv("OPENAI_API_KEY"): | |
api_missing_message = "⚠️ OpenAI API Key not configured. Please enter your API key in the Admin Panel to continue the interview." | |
api_missing_audio_path = convert_text_to_speech(api_missing_message) | |
history.append({"role": "assistant", "content": api_missing_message}) | |
return history, "", api_missing_audio_path | |
# If there's audio, transcribe it. | |
if audio_input: | |
transcript = transcribe_audio(audio_input) | |
user_input = transcript if transcript else user_input # Use transcribed text if available | |
# If user typed "exit" or "quit" | |
if user_input.strip().lower() in ["exit", "quit"]: | |
history.append({ | |
"role": "assistant", | |
"content": "The interview has ended at your request. Thank you for your time!" | |
}) | |
is_interview_finished[0] = True | |
save_interview_history(interview_transcript) # Save history before exit | |
return history, "", None | |
# If the interview is already finished, do nothing. | |
if is_interview_finished[0]: | |
return history, "", None | |
# Add user's input to history | |
history.append({"role": "user", "content": user_input}) | |
interview_transcript.append({"role": "user", "content": user_input}) # Add to transcript | |
#This is a new user response, add to the short history | |
conversation_history.append({ | |
"question": questions[current_question_index[0]] if current_question_index[0] < len(questions) and interview_stage[0] == "questioning" else ("User Question" if interview_stage[0] == "answering_user_questions" else "End of interview"), # to handle index out of bound during final step | |
"answer": user_input | |
}) | |
# Build the prompt | |
short_history = "\n".join([ | |
f"Q: {entry['question']}\nA: {entry['answer']}" | |
for entry in conversation_history | |
]) | |
messages = [] | |
if interview_stage[0] == "questioning": | |
# Normal question flow | |
combined_prompt = ( | |
f"{updated_system_prompt}\n\nPrevious Q&A:\n{short_history}\n\n" | |
f"User's input: {user_input}\n\n" | |
"Acknowledge the user's answer briefly, then ask the *next* question, unless this was the last question." | |
) | |
messages = [ | |
SystemMessage(content=updated_system_prompt), | |
HumanMessage(content=combined_prompt), | |
] | |
elif interview_stage[0] == "user_questions_prompt" or interview_stage[0] == "answering_user_questions": | |
# Handling user questions phase | |
combined_prompt = ( | |
f"{updated_system_prompt}\n\nPrevious Q&A:\n{short_history}\n\n" | |
f"User's input (User Question): {user_input}\n\n" | |
"Answer the user's question concisely and politely. If the user says they have no questions or similar, then deliver the final message." | |
) | |
messages = [ | |
SystemMessage(content=updated_system_prompt), | |
HumanMessage(content=combined_prompt), | |
] | |
elif interview_stage[0] == "final_message_stage": | |
# Should not reach here as final message is sent directly and stage becomes "finished" | |
pass | |
elif interview_stage[0] == "finished": | |
return history, "", None # Interview is finished | |
if messages: # Proceed only if messages are prepared (not in final_message_stage or finished) | |
# Ask ChatOpenAI | |
response = chat.invoke(messages) | |
response_content = response.content.strip() | |
history.append({"role": "assistant", "content": response_content}) | |
interview_transcript.append({"role": "assistant", "content": response_content}) # Add to transcript | |
# Convert the LLM's answer to speech | |
audio_file_path = convert_text_to_speech(response_content) | |
else: | |
audio_file_path = None | |
if interview_stage[0] == "questioning": | |
# Advance to the next question or handle end of questions | |
follow_up_count[0] = 0 # Reset follow-up counter for the next main question | |
if current_question_index[0] < len(questions) -1 : # Check against len(questions) - 1 | |
current_question_index[0] += 1 | |
print(f"DEBUG - question index {current_question_index[0]}") | |
print("DEBUG - Moving to next main question.") | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", audio_file_path # Return current audio | |
else: | |
# Last question answered, ask "Do you have any questions?" | |
if not user_questions_asked[0]: | |
user_questions_prompt_message = "Thank you for your answer. Do you have any questions for me?" | |
user_questions_audio_path = convert_text_to_speech(user_questions_prompt_message) | |
history.append({"role": "assistant", "content": user_questions_prompt_message}) | |
interview_transcript.append({"role": "assistant", "content": user_questions_prompt_message}) | |
interview_stage[0] = "user_questions_prompt" | |
user_questions_asked[0] = True # Ensure this prompt is only asked once | |
print("DEBUG - Asked 'Do you have any questions?'") | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", user_questions_audio_path | |
else: | |
# This should not be reached in normal flow for last question, but as a fallback. | |
pass # Fallthrough to handle user questions or finalize below | |
if interview_stage[0] == "user_questions_prompt": | |
# Check if user has questions or says no questions | |
if user_input.strip().lower() in ["no", "no questions", "none", "nothing", "that's all", "no, thank you"]: | |
final_audio_path = convert_text_to_speech(final_message_content) | |
history.append({"role": "assistant", "content": final_message_content}) | |
interview_transcript.append({"role": "assistant", "content": final_message_content}) | |
interview_stage[0] = "finished" | |
is_interview_finished[0] = True | |
save_interview_history(interview_transcript) # Save history at the end | |
print("DEBUG - Interview finished after user said no questions.") | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", final_audio_path | |
else: | |
# User asked a question, move to answering stage | |
interview_stage[0] = "answering_user_questions" | |
print("DEBUG - User asked a question, moving to answering stage.") | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", audio_file_path # Respond with the AI's answer to user's question in the 'messages' processing block | |
elif interview_stage[0] == "answering_user_questions": | |
# After answering user question, go back to user_questions_prompt to allow more questions or finalize | |
interview_stage[0] = "user_questions_prompt" | |
print("DEBUG - Answered user question, back to user_questions_prompt.") | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", audio_file_path # Already responded in 'messages' block | |
elif interview_stage[0] == "final_message_stage": # Redundant stage, final message sent directly when no more questions | |
pass # Should not reach here | |
elif interview_stage[0] == "finished": | |
return history, "", None # Interview already finished | |
print(f"DEBUG - Interview step time: {time.time() - step_start_time:.2f} seconds") | |
return history, "", audio_file_path | |
# Return the step function plus initial/final text | |
return interview_step, initial_message, final_message_content | |
def main(): | |
QUESTIONS_FILE_PATH = "questions.json" | |
try: | |
global questions # Use the global questions variable | |
questions = read_questions_from_json(QUESTIONS_FILE_PATH) | |
num_questions = len(questions) # Count the number of questions | |
print(f"Loaded {num_questions} questions from {QUESTIONS_FILE_PATH}") # Inform user about question count | |
except Exception as e: | |
print(f"Error reading questions: {e}") | |
return | |
global initial_api_key_status_message # Access and set the global variable | |
initial_api_key_status_message = check_api_key() # Check API key and update status | |
interview_func, initial_message, final_message = conduct_interview(questions) # Initialize even if API key is missing | |
css = """ | |
.contain { display: flex; flex-direction: column; } | |
.gradio-container { height: 100vh !important; overflow-y: auto; } | |
#component-0 { height: 100%; } | |
.chatbot { flex-grow: 1; overflow: auto; height: 650px; } | |
.user > div > .message { background-color: #dcf8c6 !important } | |
.bot > div > .message { background-color: #f7f7f8 !important } | |
""" | |
# Build Gradio interface | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown( | |
"<h1 style='text-align:center;'>👋 AI HR Interview Assistant</h1>" | |
) | |
gr.Markdown( | |
"I will ask you a series of questions. Please answer honestly and thoughtfully. " | |
"When you are ready, click **Start Interview** to begin." | |
) | |
start_btn = gr.Button(" Start Interview", variant="primary") | |
chatbot = gr.Chatbot( # Moved up here | |
label="Interview Chat", | |
height=650, | |
type='messages' # must return a list of dicts: {"role":..., "content":...} | |
) | |
audio_input = gr.Audio( # Moved up here | |
sources=["microphone"], | |
type="filepath", | |
label="Record Your Answer" | |
) | |
user_input = gr.Textbox( # Moved up here | |
label="Your Response", | |
placeholder="Type your answer here or use the microphone...", | |
lines=1, | |
) | |
audio_output = gr.Audio(label="Response Audio", autoplay=True) # Moved up here | |
with gr.Row(): | |
submit_btn = gr.Button("Submit", variant="primary") | |
clear_btn = gr.Button("Clear Chat") | |
# Admin Panel Tab | |
with gr.Tab("Admin Panel", id="admin_tab"): | |
with gr.Tab("API Key Settings"): | |
gr.Markdown("### OpenAI API Key Configuration") | |
api_key_input = gr.Textbox(label="Enter your OpenAI API Key", type="password", placeholder="••••••••••••••••••••••••••••••••") | |
api_key_status_output = gr.Textbox(label="API Key Status", value=initial_api_key_status_message, interactive=False) | |
update_api_key_button = gr.Button("Update API Key") | |
gr.Markdown("*This application does not store your API key. It is used only for this session and is not persisted when you close the app.*") | |
def update_api_key(api_key): | |
os.environ["OPENAI_API_KEY"] = api_key # Caution: Modifying os.environ is session-based | |
global interview_func, initial_message, final_message, questions, initial_api_key_status_message # Declare globals to update them and questions | |
initial_api_key_status_message = check_api_key() # Update status immediately after key is entered | |
interview_func, initial_message, final_message = conduct_interview(questions) # Re-init interview function, now questions is in scope | |
return initial_api_key_status_message # Return status message | |
update_api_key_button.click( | |
update_api_key, | |
inputs=[api_key_input], | |
outputs=[api_key_status_output], | |
) | |
# with gr.Tab("Generate Questions"): | |
with gr.Tab("Generate Questions"): | |
try: | |
# Assuming these are defined in backend2.py | |
from backend3 import ( | |
load_json_data, | |
PROFESSIONS_FILE, | |
TYPES_FILE, | |
generate_questions_manager, | |
update_max_questions, | |
generate_and_save_questions_from_pdf3, | |
generate_questions_from_job_description, | |
cleanup | |
) | |
professions_data = load_json_data(PROFESSIONS_FILE) | |
types_data = load_json_data(TYPES_FILE) | |
except (FileNotFoundError, json.JSONDecodeError) as e: | |
print(f"Error loading data from JSON files: {e}") | |
professions_data = [] | |
types_data = [] | |
profession_names = [ | |
item["profession"] for item in professions_data | |
] if professions_data else [] | |
interview_types = [ | |
item["type"] for item in types_data | |
] if types_data else [] | |
with gr.Row(): | |
profession_input = gr.Dropdown( | |
label="Select Profession", | |
choices=profession_names | |
) | |
interview_type_input = gr.Dropdown( | |
label="Select Interview Type", | |
choices=interview_types | |
) | |
num_questions_input = gr.Number( | |
label="Number of Questions (1-20)", | |
value=5, | |
precision=0, | |
minimum=1, | |
maximum=20, | |
) | |
overwrite_input = gr.Checkbox( | |
label="Overwrite all_questions.json?", value=True | |
) | |
# Update num_questions_input when interview_type_input changes | |
interview_type_input.change( | |
fn=update_max_questions, | |
inputs=interview_type_input, | |
outputs=num_questions_input, | |
) | |
generate_button = gr.Button("Generate Questions") | |
output_text = gr.Textbox(label="Output") | |
question_output = gr.JSON(label="Generated Questions") | |
generate_button.click( | |
generate_questions_manager, | |
inputs=[ | |
profession_input, | |
interview_type_input, | |
num_questions_input, | |
overwrite_input, | |
], | |
outputs=[output_text, question_output], | |
) | |
with gr.Tab("Generate from PDF"): | |
gr.Markdown("### 📄 Upload PDF for Question Generation") | |
pdf_file_input = gr.File(label="Upload PDF File", type="filepath") | |
num_questions_pdf_input = gr.Number( | |
label="Number of Questions (1-30)", | |
value=5, | |
precision=0, | |
minimum=1, | |
maximum=30, | |
) | |
pdf_status_output = gr.Textbox(label="Status", lines=3) | |
pdf_question_output = gr.JSON(label="Generated Questions") | |
generate_pdf_button = gr.Button("Generate Questions from PDF") | |
def update_pdf_ui(pdf_path, num_questions): | |
print(f"[DEBUG] PDF Path: {pdf_path}") | |
print(f"[DEBUG] Requested Number of Questions: {num_questions}") | |
all_statuses = [] | |
all_questions = [] | |
print(f"[DEBUG] Calling generate_and_save_questions_from_pdf3 with {num_questions}") | |
for status, questions in generate_and_save_questions_from_pdf3(pdf_path, num_questions): | |
print(f"[DEBUG] Status: {status}, Questions Generated: {len(questions)}") | |
all_statuses.append(status) | |
all_questions.append(questions) | |
combined_status = "\n".join(all_statuses) | |
final_questions = all_questions[-1] if all_questions else [] | |
return gr.update(value=combined_status), gr.update(value=final_questions) | |
generate_pdf_button.click( | |
update_pdf_ui, | |
inputs=[pdf_file_input, num_questions_pdf_input], | |
outputs=[pdf_status_output, pdf_question_output], | |
) | |
with gr.Tab("Generate from Job Description"): | |
gr.Markdown("### 📝 Enter Job Description for Question Generation") | |
job_description_input = gr.Textbox(label="Job Description", placeholder="Type or paste the job description here...", lines=6) | |
num_questions_job_input = gr.Number( | |
label="Number of Questions (1-30)", | |
value=5, | |
precision=0, | |
minimum=1, | |
maximum=30 | |
) | |
job_status_output = gr.Textbox(label="Status", lines=3) | |
job_question_output = gr.JSON(label="Generated Questions") | |
generate_job_button = gr.Button("Generate Questions from Job Description") | |
def update_job_description_ui(job_description, num_questions): | |
print(f"[DEBUG] Job Description Length: {len(job_description)} characters") | |
print(f"[DEBUG] Requested Number of Questions: {num_questions}") | |
status, questions = generate_questions_from_job_description(job_description, num_questions) | |
return gr.update(value=status), gr.update(value=questions) | |
generate_job_button.click( | |
update_job_description_ui, | |
inputs=[job_description_input, num_questions_job_input], | |
outputs=[job_status_output, job_question_output], | |
) | |
# --- Gradio callback functions --- | |
def start_interview(): | |
""" | |
Resets the chat and provides an initial greeting and first question. | |
Must return a list of {'role':'assistant','content':'...'} messages | |
plus empty text for user_input and path for audio_output. | |
""" | |
global interview_func, questions, initial_api_key_status_message # Access global variables, use global not nonlocal here | |
current_api_key_status = check_api_key() # Check API key status right before starting interview | |
if not current_api_key_status.startswith("✅"): # If API key is not valid | |
error_message = "Please set a valid OpenAI API Key in the Admin Panel before starting the interview." | |
tts_path = convert_text_to_speech(error_message) | |
return [{"role": "assistant", "content": error_message}], "", tts_path | |
try: | |
global questions # Ensure we are using the global questions variable | |
questions = read_questions_from_json(QUESTIONS_FILE_PATH) # Reload questions in case file changed | |
interview_func, initial_message, final_message = conduct_interview(questions) # Re-init interview func with new questions | |
except Exception as e: | |
error_message = f"Error reloading questions or setting up interview: {e}. Please check questions.json and API Key." | |
print(error_message) | |
tts_path = convert_text_to_speech(error_message) | |
return [{"role": "assistant", "content": error_message}], "", tts_path # Return error message to chatbot | |
history = [] | |
# Combine initial + the first question | |
if questions: | |
first_q_text = f" Let's begin! Here's your first question: {questions[0]}" | |
else: | |
first_q_text = "No questions loaded. Please check questions.json or generate questions in the Admin Panel." | |
combined = initial_message + first_q_text | |
tts_path = convert_text_to_speech(combined) | |
# Return one assistant message to the Chatbot | |
history.append({"role": "assistant", "content": combined}) | |
return history, "", tts_path | |
def interview_step_wrapper(user_response, audio_response, history): | |
""" | |
Wrap the 'interview_func' so we always return the correct format: | |
(list_of_dicts, str, audio_file_path). | |
""" | |
new_history, _, audio_path = interview_func(user_response, audio_response, history) | |
return new_history, "", audio_path | |
def on_enter_submit(history, user_text): | |
""" | |
If user presses Enter in the textbox. Return updated Chatbot history, | |
empty user_input, and any audio. | |
""" | |
if not user_text.strip(): | |
# If empty, do nothing | |
return history, "", None | |
new_history, _, audio_path = interview_func(user_text, None, history) | |
return new_history, "", audio_path | |
def clear_chat(): | |
""" | |
Re-initialize the interview function entirely | |
to start from scratch, clearing the Chatbot. | |
""" | |
global interview_func, initial_message, final_message, questions # Access global variables, use global not nonlocal here | |
interview_func, initial_msg, final_msg = conduct_interview(questions) # Re-init with current questions | |
return [], "", None | |
# --- Wire up the event handlers --- | |
# 1) Start button | |
start_btn.click( | |
start_interview, | |
inputs=[], | |
outputs=[chatbot, user_input, audio_output] | |
) | |
# 2) Audio: when recording stops | |
audio_input.stop_recording( | |
interview_step_wrapper, | |
inputs=[user_input, audio_input, chatbot], | |
outputs=[chatbot, user_input, audio_output] | |
) | |
# 3) Submit button | |
submit_btn.click( | |
interview_step_wrapper, | |
inputs=[user_input, audio_input, chatbot], | |
outputs=[chatbot, user_input, audio_output] | |
) | |
# 4) Pressing Enter in the textbox | |
user_input.submit( | |
on_enter_submit, | |
inputs=[chatbot, user_input], | |
outputs=[chatbot, user_input, audio_output] | |
) | |
# 5) Clear button | |
clear_btn.click( | |
clear_chat, | |
inputs=[], | |
outputs=[chatbot, user_input, audio_output] | |
) | |
# Launch Gradio (remove `share=True` if it keeps failing) | |
demo.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
# share=True # Remove or comment out if you get share-link errors | |
) | |
if __name__ == "__main__": | |
main() |