ProfessorLeVesseur's picture
Update main.py
258d1fd verified
import streamlit as st
import pandas as pd
from app_config import AppConfig # Import the configurations class
from data_processor import DataProcessor # Import the data analysis class
from visualization import Visualization # Import the data viz class
from ai_analysis import AIAnalysis # Import the ai analysis class
from sidebar import Sidebar # Import the Sidebar class
def main():
# Initialize the app configuration
app_config = AppConfig()
# Initialize the sidebar
sidebar = Sidebar()
sidebar.display()
# Initialize the data processor
data_processor = DataProcessor()
# Initialize the visualization handler
visualization = Visualization()
# Initialize the AI analysis handler
ai_analysis = AIAnalysis(data_processor.client)
st.title("Intervention Program Analysis")
# Date selection option
date_option = st.radio(
"Select data range:",
("All Data", "Date Range")
)
# Initialize start and end date variables
start_date = None
end_date = None
if date_option == "Date Range":
# Prompt user to enter start and end dates
start_date = st.date_input("Start Date")
end_date = st.date_input("End Date")
# Ensure start date is before end date
if start_date > end_date:
st.error("Start date must be before end date.")
return
# File uploader
uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
if uploaded_file is not None:
try:
# Read the Excel file into a DataFrame
df = data_processor.read_excel(uploaded_file)
# Format the session data
df = data_processor.format_session_data(df)
# Replace student names with initials
df = data_processor.replace_student_names_with_initials(df)
# Filter data if date range is selected
if date_option == "Date Range":
# Convert start_date and end_date to datetime
start_date = pd.to_datetime(start_date).date()
end_date = pd.to_datetime(end_date).date()
# Filter the DataFrame based on the selected date range
df = df[(df['Date of Session'] >= start_date) & (df['Date of Session'] <= end_date)]
st.subheader("Uploaded Data")
st.write(df)
# Ensure expected column is available
if DataProcessor.INTERVENTION_COLUMN not in df.columns:
st.error(f"Expected column '{DataProcessor.INTERVENTION_COLUMN}' not found.")
return
# Compute Intervention Session Statistics
intervention_stats = data_processor.compute_intervention_statistics(df)
st.subheader("Intervention Session Statistics")
st.write(intervention_stats)
# Plot and download intervention statistics
# intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
# visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# Plot and download intervention statistics: Two-column layout for the visualization and intervention frequency
col1, col2 = st.columns([3, 1]) # Set the column width ratio
with col1:
intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
with col2:
intervention_frequency = intervention_stats['Intervention Frequency (%)'].values[0]
# Display the "Intervention Frequency (%)" text
st.markdown("<h3 style='color: #358E66;'>Intervention Frequency</h3>", unsafe_allow_html=True)
# Display the frequency value below it
st.markdown(f"<h1 style='color: #358E66;'>{intervention_frequency}%</h1>", unsafe_allow_html=True)
visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# Compute Student Metrics
student_metrics_df = data_processor.compute_student_metrics(df)
st.subheader("Student Metrics")
st.write(student_metrics_df)
# Compute Student Metric Averages
attendance_avg_stats, engagement_avg_stats = data_processor.compute_average_metrics(student_metrics_df)
# Plot and download student metrics
student_metrics_fig = visualization.plot_student_metrics(student_metrics_df, attendance_avg_stats, engagement_avg_stats)
visualization.download_chart(student_metrics_fig, "student_metrics_chart.png")
# Evaluate each student and build decision tree diagrams
student_metrics_df['Evaluation'] = student_metrics_df.apply(
lambda row: data_processor.evaluate_student(row), axis=1
)
st.subheader("Student Evaluations")
st.write(student_metrics_df[['Student', 'Evaluation']])
# # Build and display decision tree diagrams for each student
# for index, row in student_metrics_df.iterrows():
# tree_diagram = visualization.build_tree_diagram(row)
# st.graphviz_chart(tree_diagram.source)
# Build and display decision tree diagrams for each student
for index, row in student_metrics_df.iterrows():
tree_diagram = visualization.build_tree_diagram(row)
# Get the student's name from the DataFrame
student_name = row['Student']
# Use st.expander to wrap the graphviz chart with the student's name
with st.expander(f"{student_name} Decision Tree", expanded=False):
st.graphviz_chart(tree_diagram.source)
# Prepare input for the language model
llm_input = ai_analysis.prepare_llm_input(student_metrics_df)
# Generate Notes and Recommendations using Hugging Face LLM
with st.spinner("Generating AI analysis..."):
recommendations = ai_analysis.prompt_response_from_hf_llm(llm_input)
st.subheader("AI Analysis")
st.markdown(recommendations)
# Download AI output
ai_analysis.download_llm_output(recommendations, "llm_output.txt")
except Exception as e:
st.error(f"Error processing the file: {str(e)}")
if __name__ == '__main__':
main()