import streamlit as st import openai import os from openai import OpenAI import re # Initialize the session state for the selection box if "selected_option" not in st.session_state: st.session_state["selected_option"] = None # Default value if "selected_task" not in st.session_state: st.session_state["selected_task"] = None # Default value api_key = os.getenv("NVIDIA_API_KEY") # Check if the API key is found if api_key is None: st.error("NVIDIA_API_KEY environment variable not found.") else: # Initialize the OpenAI client client = OpenAI( base_url="https://integrate.api.nvidia.com/v1", api_key=api_key ) def generate_prompt(topic, difficulty, num_questions): """Generates an AI prompt based on user input.""" prompt = ( f"Generate {num_questions} quiz questions on the topic '{topic}' " f"with a difficulty level of '{difficulty}'." ) return prompt def generate_ai_response(prompt, enablestreaming): """Generates a response from an AI model Args: prompt: The prompt to send to the AI model. Returns: response from the AI model. """ try: completion = client.chat.completions.create( model="meta/llama-3.3-70b-instruct", temperature=0.5, # Adjust temperature for creativity top_p=1, max_tokens=1024, messages=[ { "role": "system", "content": "You are an AI assistant designed to generate educational \ questions that foster higher-order thinking skills in line \ with outcomes-based education. For each question, focus on \ evaluating skills such as analysis, synthesis, application, \ and evaluation rather than simple recall. Create multiple-choice \ questions with four answer options, clearly indicating the \ correct answer. Your output should strictly follow this \ JSON format:\n\n{\n \"question\": \"\",\n \"options\": [\n \"