Spaces:
Running
Running
Upload generatorgr.py
Browse files- generatorgr.py +125 -121
generatorgr.py
CHANGED
@@ -1,121 +1,125 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import json
|
3 |
-
import time
|
4 |
-
import os
|
5 |
-
|
6 |
-
from generator import PROFESSIONS_FILE, TYPES_FILE, OUTPUT_FILE
|
7 |
-
from generator import generate_questions, load_json_data, save_questions_to_file
|
8 |
-
|
9 |
-
# Load professions and interview types from JSON files
|
10 |
-
try:
|
11 |
-
professions_data = load_json_data(PROFESSIONS_FILE)
|
12 |
-
types_data = load_json_data(TYPES_FILE)
|
13 |
-
except (FileNotFoundError, json.JSONDecodeError) as e:
|
14 |
-
print(f"Error loading data from JSON files: {e}")
|
15 |
-
professions_data = []
|
16 |
-
types_data = []
|
17 |
-
|
18 |
-
# Extract profession names and interview types for the dropdown menus
|
19 |
-
profession_names = [item["profession"] for item in professions_data]
|
20 |
-
interview_types = [item["type"] for item in types_data]
|
21 |
-
|
22 |
-
# Define path for the questions.json file
|
23 |
-
QUESTIONS_FILE = "questions.json"
|
24 |
-
|
25 |
-
|
26 |
-
def generate_and_save_questions(profession, interview_type, num_questions, overwrite=True, progress=gr.Progress()):
|
27 |
-
"""
|
28 |
-
Generates questions using the generate_questions function and saves them to JSON files.
|
29 |
-
Provides progress updates.
|
30 |
-
"""
|
31 |
-
profession_info = next(
|
32 |
-
(item for item in professions_data if item["profession"] == profession), None
|
33 |
-
)
|
34 |
-
interview_type_info = next(
|
35 |
-
(item for item in types_data if item["type"] == interview_type), None
|
36 |
-
)
|
37 |
-
|
38 |
-
if profession_info is None or interview_type_info is None:
|
39 |
-
return "Error: Invalid profession or interview type selected.", None
|
40 |
-
|
41 |
-
description = profession_info["description"]
|
42 |
-
max_questions = min(int(num_questions), 20) # Ensure max is 20
|
43 |
-
|
44 |
-
progress(0, desc="Starting question generation...")
|
45 |
-
|
46 |
-
questions = generate_questions(
|
47 |
-
profession, interview_type, description, max_questions
|
48 |
-
)
|
49 |
-
|
50 |
-
progress(0.5, desc=f"Generated {len(questions)} questions. Saving...")
|
51 |
-
|
52 |
-
# Save the generated questions to the all_questions.json file
|
53 |
-
|
54 |
-
all_questions_entry = {
|
55 |
-
"profession": profession,
|
56 |
-
"interview_type": interview_type,
|
57 |
-
"description": description,
|
58 |
-
"max_questions": max_questions,
|
59 |
-
"questions": questions,
|
60 |
-
}
|
61 |
-
|
62 |
-
|
63 |
-
save_questions_to_file(OUTPUT_FILE, [all_questions_entry], overwrite=overwrite)
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
"""
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
return gr.update(value=
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import json
|
3 |
+
import time
|
4 |
+
import os
|
5 |
+
|
6 |
+
from generator import PROFESSIONS_FILE, TYPES_FILE, OUTPUT_FILE
|
7 |
+
from generator import generate_questions, load_json_data, save_questions_to_file
|
8 |
+
from splitgpt import save_questions
|
9 |
+
# Load professions and interview types from JSON files
|
10 |
+
try:
|
11 |
+
professions_data = load_json_data(PROFESSIONS_FILE)
|
12 |
+
types_data = load_json_data(TYPES_FILE)
|
13 |
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
14 |
+
print(f"Error loading data from JSON files: {e}")
|
15 |
+
professions_data = []
|
16 |
+
types_data = []
|
17 |
+
|
18 |
+
# Extract profession names and interview types for the dropdown menus
|
19 |
+
profession_names = [item["profession"] for item in professions_data]
|
20 |
+
interview_types = [item["type"] for item in types_data]
|
21 |
+
|
22 |
+
# Define path for the questions.json file
|
23 |
+
QUESTIONS_FILE = "questions.json"
|
24 |
+
|
25 |
+
|
26 |
+
def generate_and_save_questions(profession, interview_type, num_questions, overwrite=True, progress=gr.Progress()):
|
27 |
+
"""
|
28 |
+
Generates questions using the generate_questions function and saves them to JSON files.
|
29 |
+
Provides progress updates.
|
30 |
+
"""
|
31 |
+
profession_info = next(
|
32 |
+
(item for item in professions_data if item["profession"] == profession), None
|
33 |
+
)
|
34 |
+
interview_type_info = next(
|
35 |
+
(item for item in types_data if item["type"] == interview_type), None
|
36 |
+
)
|
37 |
+
|
38 |
+
if profession_info is None or interview_type_info is None:
|
39 |
+
return "Error: Invalid profession or interview type selected.", None
|
40 |
+
|
41 |
+
description = profession_info["description"]
|
42 |
+
max_questions = min(int(num_questions), 20) # Ensure max is 20
|
43 |
+
|
44 |
+
progress(0, desc="Starting question generation...")
|
45 |
+
|
46 |
+
questions = generate_questions(
|
47 |
+
profession, interview_type, description, max_questions
|
48 |
+
)
|
49 |
+
|
50 |
+
progress(0.5, desc=f"Generated {len(questions)} questions. Saving...")
|
51 |
+
|
52 |
+
# Save the generated questions to the all_questions.json file
|
53 |
+
|
54 |
+
all_questions_entry = {
|
55 |
+
"profession": profession,
|
56 |
+
"interview_type": interview_type,
|
57 |
+
"description": description,
|
58 |
+
"max_questions": max_questions,
|
59 |
+
"questions": questions,
|
60 |
+
}
|
61 |
+
|
62 |
+
|
63 |
+
save_questions_to_file(OUTPUT_FILE, [all_questions_entry], overwrite=overwrite)
|
64 |
+
|
65 |
+
save_questions(questions)
|
66 |
+
|
67 |
+
# Save the generated questions to the new questions.json file
|
68 |
+
with open(QUESTIONS_FILE, "w") as outfile:
|
69 |
+
json.dump(questions, outfile, indent=4)
|
70 |
+
|
71 |
+
progress(1, desc="Questions saved.")
|
72 |
+
|
73 |
+
return (
|
74 |
+
f"β
Questions generated and saved for {profession} ({interview_type}). Max questions: {max_questions}",
|
75 |
+
questions,
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
def update_max_questions(interview_type):
|
81 |
+
"""
|
82 |
+
Updates the default value of the number input based on the selected interview type.
|
83 |
+
"""
|
84 |
+
interview_type_info = next(
|
85 |
+
(item for item in types_data if item["type"] == interview_type), None
|
86 |
+
)
|
87 |
+
if interview_type_info:
|
88 |
+
default_max_questions = interview_type_info.get("max_questions", 5)
|
89 |
+
return gr.update(value=default_max_questions, minimum=1, maximum=20)
|
90 |
+
else:
|
91 |
+
return gr.update(value=5, minimum=1, maximum=20)
|
92 |
+
|
93 |
+
'''
|
94 |
+
with gr.Blocks() as demo:
|
95 |
+
gr.Markdown("## π Interview Question Generator for IBM CIC")
|
96 |
+
with gr.Row():
|
97 |
+
profession_input = gr.Dropdown(label="Select Profession", choices=profession_names)
|
98 |
+
interview_type_input = gr.Dropdown(label="Select Interview Type", choices=interview_types)
|
99 |
+
|
100 |
+
num_questions_input = gr.Number(
|
101 |
+
label="Number of Questions (1-20)", value=5, precision=0, minimum=1, maximum=20
|
102 |
+
)
|
103 |
+
|
104 |
+
generate_button = gr.Button("Generate Questions")
|
105 |
+
|
106 |
+
output_text = gr.Textbox(label="Output")
|
107 |
+
question_output = gr.JSON(label="Generated Questions")
|
108 |
+
|
109 |
+
# Update num_questions_input when interview_type_input changes
|
110 |
+
interview_type_input.change(
|
111 |
+
fn=update_max_questions,
|
112 |
+
inputs=interview_type_input,
|
113 |
+
outputs=num_questions_input,
|
114 |
+
)
|
115 |
+
|
116 |
+
generate_button.click(
|
117 |
+
generate_and_save_questions,
|
118 |
+
inputs=[profession_input, interview_type_input, num_questions_input],
|
119 |
+
outputs=[output_text, question_output],
|
120 |
+
)
|
121 |
+
|
122 |
+
if __name__ == "__main__":
|
123 |
+
demo.queue().launch()
|
124 |
+
|
125 |
+
'''
|