louiecerv commited on
Commit
dc07835
·
1 Parent(s): a2f515c
Files changed (2) hide show
  1. app.py +99 -15
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,4 +1,27 @@
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  def generate_prompt(topic, difficulty, num_questions):
4
  """Generates an AI prompt based on user input."""
@@ -8,13 +31,72 @@ def generate_prompt(topic, difficulty, num_questions):
8
  )
9
  return prompt
10
 
11
- def get_ai_response(prompt):
12
- """Dummy function to simulate getting a response from an AI model."""
13
- response = f"Here are your generated questions based on the prompt: {prompt}"
14
- return response
 
15
 
16
- # Streamlit App
17
- st.title("Nemotron Quizmaster")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  with st.expander("About"):
20
  st.write(
@@ -32,18 +114,20 @@ difficulty = st.selectbox("Select difficulty level:", difficulty_levels)
32
  num_questions = st.selectbox("Select the number of questions:", [5, 10, 15])
33
 
34
  # Generate AI prompt
35
- if st.button("Generate AI Prompt"):
36
- if topic.strip():
37
- ai_prompt = generate_prompt(topic, difficulty, num_questions)
38
- st.write(f"Generated Prompt: {ai_prompt}")
39
- else:
40
- st.warning("Please enter a topic before generating a prompt.")
 
41
 
42
  # Get AI response
43
  if st.button("Get Response"):
44
  if topic.strip():
45
- ai_prompt = generate_prompt(topic, difficulty, num_questions)
46
- ai_response = get_ai_response(ai_prompt)
47
- st.write(ai_response)
 
48
  else:
49
  st.warning("Please generate a prompt first before getting a response.")
 
1
  import streamlit as st
2
+ import openai
3
+ import os
4
+ from openai import OpenAI
5
+ import re
6
+
7
+ # Initialize the session state for the selection box
8
+ if "selected_option" not in st.session_state:
9
+ st.session_state["selected_option"] = None # Default value
10
+
11
+ if "selected_task" not in st.session_state:
12
+ st.session_state["selected_task"] = None # Default value
13
+
14
+ api_key = os.getenv("NVIDIA_API_KEY")
15
+
16
+ # Check if the API key is found
17
+ if api_key is None:
18
+ st.error("NVIDIA_API_KEY environment variable not found.")
19
+ else:
20
+ # Initialize the OpenAI client
21
+ client = OpenAI(
22
+ base_url="https://integrate.api.nvidia.com/v1",
23
+ api_key=api_key
24
+ )
25
 
26
  def generate_prompt(topic, difficulty, num_questions):
27
  """Generates an AI prompt based on user input."""
 
31
  )
32
  return prompt
33
 
34
+ def generate_ai_response(prompt, enablestreaming):
35
+ """Generates a response from an AI model
36
+
37
+ Args:
38
+ prompt: The prompt to send to the AI model.
39
 
40
+ Returns:
41
+ response from the AI model.
42
+ """
43
+ try:
44
+ completion = client.chat.completions.create(
45
+ model="meta/llama-3.3-70b-instruct",
46
+ temperature=0.5, # Adjust temperature for creativity
47
+ top_p=1,
48
+ max_tokens=1024,
49
+ messages=[
50
+ {
51
+ "role": "system",
52
+ "content": "You are an AI assistant designed to generate educational \
53
+ questions that foster higher-order thinking skills in line \
54
+ with outcomes-based education. For each question, focus on \
55
+ evaluating skills such as analysis, synthesis, application, \
56
+ and evaluation rather than simple recall. Create multiple-choice \
57
+ questions with four answer options, clearly indicating the \
58
+ correct answer. Your output should strictly follow this \
59
+ JSON format:\n\n{\n \"question\": \"<Insert the question \
60
+ text here>\",\n \"options\": [\n \"<Option A>\",\n \
61
+ \"<Option B>\",\n \"<Option C>\",\n \"<Option D>\"\n ],\n \
62
+ \"correct_answer\": \"<Insert the correct option text here>\"\n}\n\n \
63
+ Ensure questions are designed to encourage critical thinking \
64
+ and align with measurable learning outcomes. The topic can \
65
+ range across various disciplines based on provided inputs \
66
+ or your general knowledge. For instance, if the topic is \
67
+ 'AI and Machine Learning,' ensure the question engages \
68
+ learners in practical or theoretical applications of the \
69
+ subject. Ensure that every question is unique in a set of questions."
70
+ },
71
+ {
72
+ "role": "user",
73
+ "content": prompt
74
+ }
75
+ ],
76
+ stream = enablestreaming
77
+ )
78
+
79
+ if enablestreaming:
80
+ # Extract and display the response
81
+ response_container = st.empty()
82
+ model_response=""
83
+ for chunk in completion:
84
+ if chunk.choices[0].delta.content is not None:
85
+ model_response += chunk.choices[0].delta.content
86
+ response_container.write(model_response)
87
+ elif 'error' in chunk:
88
+ st.error(f"Error occurred: {chunk['error']}")
89
+ break
90
+ else:
91
+ # this is used if output streaming is not enabled
92
+ model_response = completion.choices[0].message.content
93
+
94
+ return model_response
95
+ except Exception as e:
96
+ st.error(f"An error occurred: {e}")
97
+ return None
98
+
99
+ st.title("Quiz Question Generator")
100
 
101
  with st.expander("About"):
102
  st.write(
 
114
  num_questions = st.selectbox("Select the number of questions:", [5, 10, 15])
115
 
116
  # Generate AI prompt
117
+ ai_prompt = ""
118
+
119
+ if topic.strip():
120
+ ai_prompt = generate_prompt(topic, difficulty, num_questions)
121
+ st.write(f"Generated Prompt: {ai_prompt}")
122
+ else:
123
+ st.warning("Please enter a topic before generating a prompt.")
124
 
125
  # Get AI response
126
  if st.button("Get Response"):
127
  if topic.strip():
128
+ print(ai_prompt)
129
+ with st.spinner("Thinking..."):
130
+ response = generate_ai_response(ai_prompt, enablestreaming=True)
131
+ st.success("Response generated successfully.")
132
  else:
133
  st.warning("Please generate a prompt first before getting a response.")
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  streamlit
2
- transformers
 
1
  streamlit
2
+ openai