File size: 6,549 Bytes
47e386b bf40641 8cfb9bd d96c1a5 47e386b 8d0e96f 47e386b d96c1a5 8cfb9bd d96c1a5 47e386b 8d0e96f 258d1fd 8d0e96f 47e386b 8d0e96f 9d880f3 f238172 258d1fd f238172 ecb9b1c 9d880f3 f238172 47e386b d70ad07 90d5f4e d70ad07 47e386b d70ad07 47e386b 8cfb9bd 4888d37 97994b8 8cfb9bd df06091 97994b8 3e07f55 97994b8 e4df3ae 47e386b 8cfb9bd 47e386b a7df111 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import streamlit as st
import pandas as pd
from app_config import AppConfig # Import the configurations class
from data_processor import DataProcessor # Import the data analysis class
from visualization import Visualization # Import the data viz class
from ai_analysis import AIAnalysis # Import the ai analysis class
from sidebar import Sidebar # Import the Sidebar class
def main():
# Initialize the app configuration
app_config = AppConfig()
# Initialize the sidebar
sidebar = Sidebar()
sidebar.display()
# Initialize the data processor
data_processor = DataProcessor()
# Initialize the visualization handler
visualization = Visualization()
# Initialize the AI analysis handler
ai_analysis = AIAnalysis(data_processor.client)
st.title("Intervention Program Analysis")
# Date selection option
date_option = st.radio(
"Select data range:",
("All Data", "Date Range")
)
# Initialize start and end date variables
start_date = None
end_date = None
if date_option == "Date Range":
# Prompt user to enter start and end dates
start_date = st.date_input("Start Date")
end_date = st.date_input("End Date")
# Ensure start date is before end date
if start_date > end_date:
st.error("Start date must be before end date.")
return
# File uploader
uploaded_file = st.file_uploader("Upload your Excel file", type=["xlsx"])
if uploaded_file is not None:
try:
# Read the Excel file into a DataFrame
df = data_processor.read_excel(uploaded_file)
# Format the session data
df = data_processor.format_session_data(df)
# Replace student names with initials
df = data_processor.replace_student_names_with_initials(df)
# Filter data if date range is selected
if date_option == "Date Range":
# Convert start_date and end_date to datetime
start_date = pd.to_datetime(start_date).date()
end_date = pd.to_datetime(end_date).date()
# Filter the DataFrame based on the selected date range
df = df[(df['Date of Session'] >= start_date) & (df['Date of Session'] <= end_date)]
st.subheader("Uploaded Data")
st.write(df)
# Ensure expected column is available
if DataProcessor.INTERVENTION_COLUMN not in df.columns:
st.error(f"Expected column '{DataProcessor.INTERVENTION_COLUMN}' not found.")
return
# Compute Intervention Session Statistics
intervention_stats = data_processor.compute_intervention_statistics(df)
st.subheader("Intervention Session Statistics")
st.write(intervention_stats)
# Plot and download intervention statistics
# intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
# visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# Plot and download intervention statistics: Two-column layout for the visualization and intervention frequency
col1, col2 = st.columns([3, 1]) # Set the column width ratio
with col1:
intervention_fig = visualization.plot_intervention_statistics(intervention_stats)
with col2:
intervention_frequency = intervention_stats['Intervention Frequency (%)'].values[0]
# Display the "Intervention Frequency (%)" text
st.markdown("<h3 style='color: #358E66;'>Intervention Frequency</h3>", unsafe_allow_html=True)
# Display the frequency value below it
st.markdown(f"<h1 style='color: #358E66;'>{intervention_frequency}%</h1>", unsafe_allow_html=True)
visualization.download_chart(intervention_fig, "intervention_statistics_chart.png")
# Compute Student Metrics
student_metrics_df = data_processor.compute_student_metrics(df)
st.subheader("Student Metrics")
st.write(student_metrics_df)
# Compute Student Metric Averages
attendance_avg_stats, engagement_avg_stats = data_processor.compute_average_metrics(student_metrics_df)
# Plot and download student metrics
student_metrics_fig = visualization.plot_student_metrics(student_metrics_df, attendance_avg_stats, engagement_avg_stats)
visualization.download_chart(student_metrics_fig, "student_metrics_chart.png")
# Evaluate each student and build decision tree diagrams
student_metrics_df['Evaluation'] = student_metrics_df.apply(
lambda row: data_processor.evaluate_student(row), axis=1
)
st.subheader("Student Evaluations")
st.write(student_metrics_df[['Student', 'Evaluation']])
# # Build and display decision tree diagrams for each student
# for index, row in student_metrics_df.iterrows():
# tree_diagram = visualization.build_tree_diagram(row)
# st.graphviz_chart(tree_diagram.source)
# Build and display decision tree diagrams for each student
for index, row in student_metrics_df.iterrows():
tree_diagram = visualization.build_tree_diagram(row)
# Get the student's name from the DataFrame
student_name = row['Student']
# Use st.expander to wrap the graphviz chart with the student's name
with st.expander(f"{student_name} Decision Tree", expanded=False):
st.graphviz_chart(tree_diagram.source)
# Prepare input for the language model
llm_input = ai_analysis.prepare_llm_input(student_metrics_df)
# Generate Notes and Recommendations using Hugging Face LLM
with st.spinner("Generating AI analysis..."):
recommendations = ai_analysis.prompt_response_from_hf_llm(llm_input)
st.subheader("AI Analysis")
st.markdown(recommendations)
# Download AI output
ai_analysis.download_llm_output(recommendations, "llm_output.txt")
except Exception as e:
st.error(f"Error processing the file: {str(e)}")
if __name__ == '__main__':
main() |