Spaces:
Sleeping
Sleeping
msg
Browse files- Dockerfile +16 -0
- main.py +159 -0
- requirements.txt +5 -0
Dockerfile
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
FROM python:3.9
|
5 |
+
|
6 |
+
RUN useradd -m -u 1000 user
|
7 |
+
USER user
|
8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
9 |
+
|
10 |
+
WORKDIR /app
|
11 |
+
|
12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
14 |
+
|
15 |
+
COPY --chown=user . /app
|
16 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException,Query
|
2 |
+
import pandas as pd
|
3 |
+
from supabase import create_client, Client
|
4 |
+
import os
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
|
7 |
+
# Load environment variables
|
8 |
+
load_dotenv()
|
9 |
+
|
10 |
+
# Read Supabase credentials
|
11 |
+
SUPABASE_URL = os.getenv("SUPABASE_URL")
|
12 |
+
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
|
13 |
+
|
14 |
+
# Initialize FastAPI and Supabase
|
15 |
+
app = FastAPI()
|
16 |
+
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
|
17 |
+
|
18 |
+
# Fetch data from Supabase
|
19 |
+
try:
|
20 |
+
response = supabase.table("HR analysis").select("*").execute()
|
21 |
+
data = pd.DataFrame(response.data) if response.data else pd.DataFrame()
|
22 |
+
except Exception as e:
|
23 |
+
print(f"Error fetching data: {e}")
|
24 |
+
data = pd.DataFrame()
|
25 |
+
|
26 |
+
# Convert date columns
|
27 |
+
for col in ['Survey Date', 'StartDate', 'DOB']:
|
28 |
+
if col in data.columns:
|
29 |
+
data[col] = pd.to_datetime(data[col], errors='coerce')
|
30 |
+
|
31 |
+
# Calculate Age
|
32 |
+
if 'DOB' in data.columns:
|
33 |
+
data['Age'] = (pd.to_datetime("today") - data['DOB']).dt.days // 365
|
34 |
+
|
35 |
+
# Clean Performance Score
|
36 |
+
score_map = {"Exceeds": 5, "Fully Meets": 4, "Needs Improvement": 3, "PIP": 2}
|
37 |
+
if 'Performance Score' in data.columns:
|
38 |
+
data['Performance Score'] = data['Performance Score'].map(lambda x: score_map.get(str(x).strip(), None))
|
39 |
+
data['Performance Score'] = pd.to_numeric(data['Performance Score'], errors='coerce')
|
40 |
+
|
41 |
+
# Endpoints with try-except handling
|
42 |
+
|
43 |
+
@app.get("/satisfaction-analysis")
|
44 |
+
def satisfaction_analysis(department: str = Query(None, description="Filter by department")):
|
45 |
+
try:
|
46 |
+
if "DepartmentType" not in data.columns or "Satisfaction Score" not in data.columns:
|
47 |
+
raise HTTPException(status_code=500, detail="Required columns missing in dataset")
|
48 |
+
|
49 |
+
filtered_data = data.copy()
|
50 |
+
|
51 |
+
if department:
|
52 |
+
department = department.strip().title() # Normalize input
|
53 |
+
filtered_data = filtered_data[
|
54 |
+
filtered_data["DepartmentType"].str.strip().str.title() == department
|
55 |
+
]
|
56 |
+
|
57 |
+
if filtered_data.empty:
|
58 |
+
return [] # Return empty JSON instead of error
|
59 |
+
|
60 |
+
result = filtered_data.groupby("DepartmentType")["Satisfaction Score"].mean().reset_index()
|
61 |
+
return result.to_dict(orient="records")
|
62 |
+
except Exception as e:
|
63 |
+
raise HTTPException(status_code=500, detail=str(e))
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
@app.get("/department-performance")
|
68 |
+
def department_performance():
|
69 |
+
try:
|
70 |
+
result = data.groupby("DepartmentType")[["Performance Score", "Current Employee Rating"]].mean().reset_index()
|
71 |
+
return result.to_dict(orient="records")
|
72 |
+
except Exception as e:
|
73 |
+
raise HTTPException(status_code=500, detail=str(e))
|
74 |
+
|
75 |
+
@app.get("/training-analytics")
|
76 |
+
def training_analytics(program_name: str = Query(None, description="Filter by training program name")):
|
77 |
+
try:
|
78 |
+
filtered_data = data if program_name is None else data[data["Training Program Name"] == program_name]
|
79 |
+
|
80 |
+
if filtered_data.empty:
|
81 |
+
return []
|
82 |
+
|
83 |
+
result = filtered_data.groupby("Training Program Name")["Training Outcome"].value_counts(normalize=True).unstack(fill_value=0)
|
84 |
+
return result.reset_index().to_dict(orient="records")
|
85 |
+
except Exception as e:
|
86 |
+
raise HTTPException(status_code=500, detail=str(e))
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
@app.get("/engagement-performance")
|
91 |
+
def engagement_performance():
|
92 |
+
try:
|
93 |
+
correlation = data[['Engagement Score', 'Performance Score']].corr().iloc[0, 1]
|
94 |
+
return {"correlation_coefficient": correlation}
|
95 |
+
except Exception as e:
|
96 |
+
raise HTTPException(status_code=500, detail=str(e))
|
97 |
+
|
98 |
+
@app.get("/cost-benefit-analysis")
|
99 |
+
def cost_benefit_analysis():
|
100 |
+
try:
|
101 |
+
result = data.groupby("DepartmentType").apply(lambda x: x['Performance Score'].mean() / x['Training Cost'].sum()).reset_index(name="ROI")
|
102 |
+
return result.to_dict(orient="records")
|
103 |
+
except Exception as e:
|
104 |
+
raise HTTPException(status_code=500, detail=str(e))
|
105 |
+
|
106 |
+
@app.get("/training-effectiveness")
|
107 |
+
def training_effectiveness():
|
108 |
+
try:
|
109 |
+
result = data.groupby("Training Program Name")["Performance Score"].mean().reset_index()
|
110 |
+
return result.to_dict(orient="records")
|
111 |
+
except Exception as e:
|
112 |
+
raise HTTPException(status_code=500, detail=str(e))
|
113 |
+
|
114 |
+
@app.get("/diversity-inclusion")
|
115 |
+
def diversity_dashboard():
|
116 |
+
try:
|
117 |
+
if "DepartmentType" not in data.columns or "GenderCode" not in data.columns:
|
118 |
+
raise HTTPException(status_code=500, detail="Required columns missing in dataset")
|
119 |
+
|
120 |
+
# Compute gender distribution by department
|
121 |
+
diversity_metrics = data.groupby("DepartmentType")["GenderCode"].value_counts(normalize=True).unstack(fill_value=0).reset_index()
|
122 |
+
|
123 |
+
return diversity_metrics.to_dict(orient="records")
|
124 |
+
except Exception as e:
|
125 |
+
raise HTTPException(status_code=500, detail=str(e))
|
126 |
+
|
127 |
+
|
128 |
+
@app.get("/work-life-balance")
|
129 |
+
def worklife_balance_impact():
|
130 |
+
try:
|
131 |
+
correlation = data[['Work-Life Balance Score', 'Performance Score']].corr().iloc[0, 1]
|
132 |
+
return {"correlation_coefficient": round(correlation, 3)} # Return as a JSON object
|
133 |
+
except Exception as e:
|
134 |
+
raise HTTPException(status_code=500, detail=str(e))
|
135 |
+
|
136 |
+
|
137 |
+
@app.get("/career-development")
|
138 |
+
def career_development(employee_id: str = Query(None, description="Filter by Employee ID")):
|
139 |
+
try:
|
140 |
+
if "Employee ID" not in data.columns or "StartDate" not in data.columns:
|
141 |
+
raise HTTPException(status_code=500, detail="Required columns missing in dataset")
|
142 |
+
|
143 |
+
# Print available Employee IDs for debugging
|
144 |
+
print("Available Employee IDs:", data["Employee ID"].unique())
|
145 |
+
|
146 |
+
filtered_data = data.copy()
|
147 |
+
|
148 |
+
if employee_id:
|
149 |
+
employee_id = employee_id.strip() # Remove leading/trailing spaces
|
150 |
+
filtered_data = filtered_data[filtered_data["Employee ID"].astype(str) == employee_id]
|
151 |
+
|
152 |
+
if filtered_data.empty:
|
153 |
+
return [] # Return an empty list if no matching records
|
154 |
+
|
155 |
+
career_progress = filtered_data.groupby("Employee ID")["StartDate"].count().reset_index(name="Career Movements")
|
156 |
+
return career_progress.to_dict(orient="records")
|
157 |
+
except Exception as e:
|
158 |
+
raise HTTPException(status_code=500, detail=str(e))
|
159 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
pandas
|
4 |
+
supabase
|
5 |
+
python-dotenv
|