louiecerv's picture
added the book cover
bc40eee
import streamlit as st
import os
import google.generativeai as genai
from huggingface_hub import hf_hub_download
import base64
from PIL import Image
MODEL_ID = "gemini-2.0-flash-exp" # Keep the model ID as is
try:
api_key = os.getenv("GEMINI_API_KEY")
model_id = MODEL_ID
genai.configure(api_key=api_key)
except Exception as e:
st.error(f"Error: {e}")
st.stop
model = genai.GenerativeModel(MODEL_ID)
chat = model.start_chat()
def download_pdf():
"""
Downloads the PDF file from the Hugging Face Hub using the correct repo path and filename.
"""
try:
hf_token = os.getenv("HF_TOKEN")
repo_id = "wvsuaidev/gen_ai_and_education_dataset" # Corrected dataset repo path
filename = "Generative_AI_and_Education.pdf"
filepath = hf_hub_download(repo_id=repo_id, filename=filename, token=hf_token, repo_type="dataset")
return filepath
except Exception as e:
st.error(f"Failed to download PDF from Hugging Face Hub: {e}")
st.stop() # Stop if the download fails
# Initialize conversation history in Streamlit session state
if "conversation_history" not in st.session_state:
st.session_state.conversation_history = []
if "uploaded_file_part" not in st.session_state: # Store the file *part*
st.session_state.uploaded_file_part = None
if "uploaded_pdf_path" not in st.session_state:
st.session_state.uploaded_pdf_path = download_pdf()
def multimodal_prompt(pdf_path, text_prompt):
"""
Sends a multimodal prompt to Gemini, handling file uploads efficiently.
Args:
pdf_path: The path to the PDF file.
text_prompt: The text prompt for the model.
Returns:
The model's response as a string, or an error message.
"""
try:
if st.session_state.uploaded_file_part is None: # First time, upload
pdf_part = genai.upload_file(pdf_path, mime_type="application/pdf")
st.session_state.uploaded_file_part = pdf_part
prompt = [text_prompt, pdf_part] # First turn includes the actual file
else: # Subsequent turns, reference the file
prompt = [text_prompt, st.session_state.uploaded_file_part] # Subsequent turns include the file reference
response = chat.send_message(prompt)
# Update conversation history
st.session_state.conversation_history.append({"role": "user", "content": text_prompt, "has_pdf": True})
st.session_state.conversation_history.append({"role": "assistant", "content": response.text})
return response.text
except Exception as e:
return f"An error occurred: {e}"
def display_download_button(file_path, file_name):
try:
with open(file_path, "rb") as f:
file_bytes = f.read()
b64 = base64.b64encode(file_bytes).decode()
href = f'<a href="data:application/pdf;base64,{b64}" download="{file_name}">Download the source document (PDF)</a>'
st.markdown(href, unsafe_allow_html=True)
except FileNotFoundError:
st.error("File not found for download.")
except Exception as e:
st.error(f"Error during download: {e}")
# --- Main Page ---
st.title("πŸ“š VQA on the Generative AI and Education Book")
about = """
**How to use this App**
This app leverages Gemini 2.0 to provide insights on the provided document.
Select a question from the dropdown menu or enter your own question to get
Gemini's generated response based on the provided document.
"""
with st.expander("How to use this App"):
st.markdown(about)
# Load the image
image = Image.open("genai_educ.png")
st.image(image, width=400)
# --- Q and A Tab ---
st.header("Questions and Answers")
# Generate 5 questions based on the selected role
questions = [
"How does the history of AI inform its current applications in education?",
"What distinguishes generative AI from earlier AI forms?",
"How has the web's evolution influenced AI in education?",
"How does 'creative destruction' apply to AI in education?",
"How can institutions navigate the AI ecosystem for informed decisions?",
"What are the ethical concerns and risks of AI tools in education?",
"How can educators leverage AI tools while maintaining human-centered pedagogy?",
"What is the role of AI literacy in education?",
"What are the key features of the 'new hybrid' model in education?",
"How can educators develop skills for effective collaboration with AI?",
"What are the implications of the new hybrid model for educator roles?",
"How can the new hybrid model be used to create personalized learning experiences?",
"How can generative learning theory be applied to AI-enabled learning design?",
"What are the challenges in designing authentic assessments in the age of AI?",
"How can constructive alignment be adapted for AI-enabled learning?",
"How can Bloom's Taxonomy be reinterpreted for human-computer collaboration?",
"How can AI foster community in learning environments?",
"What are the benefits and challenges of using AI for peer learning?",
"How can the Community of Inquiry framework be adapted for AI-enabled communities?",
"What are the key considerations for institutions embedding AI?"
]
# Create a selection box
selected_question = st.selectbox("Choose a question", questions)
# Display a checkbox
if st.checkbox('Check this box to ask a question not listed above'):
# If the checkbox is checked, display a text box
selected_question = st.text_input('Enter a question')
if st.button("Ask AI"):
with st.spinner("AI is thinking..."):
if st.session_state.uploaded_pdf_path is None:
st.session_state.uploaded_pdf_path = download_pdf()
filepath = st.session_state.uploaded_pdf_path
text_prompt = f"Use the provided document to answer the following question: {selected_question}. Always the cite the relevant sections of the document."
response = multimodal_prompt(filepath, text_prompt) # Use the downloaded filepath
st.markdown(f"**Response:** {response}")
if st.session_state.uploaded_pdf_path:
display_download_button(st.session_state.uploaded_pdf_path, "Generative_AI_and_Education.pdf")
st.markdown("[Visit our Hugging Face Space!](https://huggingface.co/wvsuaidev)")
st.markdown("Β© 2025 WVSU AI Dev Team πŸ€– ✨")