Spaces:
Sleeping
Sleeping
File size: 8,475 Bytes
2792fd5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
import streamlit as st
import os
from dotenv import load_dotenv
import PyPDF2
from docx import Document
import io
from typing import Dict, Any, List
from pydantic import BaseModel, Field
import plotly.graph_objects as go
import json
import re
from docx.shared import Inches
from docx.enum.text import WD_ALIGN_PARAGRAPH
import plotly.io as pio
from mh_aspects import agent as aspects_agent
from mh_classification import agent as clarification_agent
from mh_evaluation import MHEvaluationAgent as mh_eval_agent
# Load environment variables
load_dotenv()
# Get model from environment
OPENAI_MODEL = os.getenv('OPENAI_MODEL', 'gpt-3.5-turbo')
# Initialize evaluation agent
mh_eval_agent = mh_eval_agent()
def test_api_connection():
"""Test if the OpenAI API is working"""
try:
# Create a test job description
test_jd = """Test Job Description
Position: Software Engineer
Requirements:
- 3+ years of Python experience
- Bachelor's degree in Computer Science
- Experience with web development
"""
# Try to get a response from the aspects agent
response = aspects_agent.run(input=f"Analyze this job description and generate key must-have aspects only:\n\n{test_jd}")
if response:
st.success("β
API connection successful!")
return True
else:
st.error("β API connection failed: No response received")
return False
except Exception as e:
st.error(f"β API connection failed: {str(e)}")
return False
# Pydantic model for must-have requirements
class MustHaveAnalysis(BaseModel):
category: str = Field(..., description="Category (1: No must-haves mentioned, 2: Meets Requirements, 3: Does Not Meet)")
evidence: List[str] = Field(default_factory=list, description="Evidence supporting the categorization")
confidence: float = Field(default=0.8, description="Confidence score between 0 and 1")
# Set page config
st.set_page_config(
page_title="JD & Resume Analyzer",
page_icon="π",
layout="wide"
)
# Initialize session state
if 'analysis_result' not in st.session_state:
st.session_state.analysis_result = None
if 'aspects' not in st.session_state:
st.session_state.aspects = None
if 'clarifications' not in st.session_state:
st.session_state.clarifications = None
def create_gauge_chart(value, title):
fig = go.Figure(go.Indicator(
mode="gauge+number",
value=value,
domain={'x': [0, 1], 'y': [0, 1]},
title={'text': title},
gauge={
'axis': {'range': [0, 100]},
'bar': {'color': "rgb(50, 168, 82)"},
'steps': [
{'range': [0, 33], 'color': "lightgray"},
{'range': [33, 66], 'color': "gray"},
{'range': [66, 100], 'color': "darkgray"}
],
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 80
}
}
))
fig.update_layout(
height=250,
margin=dict(l=10, r=10, t=50, b=10),
paper_bgcolor="rgba(0,0,0,0)",
font={'color': "#31333F"}
)
return fig
def extract_text_from_pdf(file):
try:
pdf_reader = PyPDF2.PdfReader(file)
text = ""
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text.strip()
except Exception as e:
st.error(f"Error reading PDF: {str(e)}")
return None
def extract_text_from_docx(file):
try:
doc = Document(io.BytesIO(file.read()))
text = ""
for paragraph in doc.paragraphs:
text += paragraph.text + "\n"
return text.strip()
except Exception as e:
st.error(f"Error reading DOCX: {str(e)}")
return None
def read_file_content(file):
if file is None:
return None
file_extension = file.name.split('.')[-1].lower()
try:
if file_extension == 'pdf':
file_copy = io.BytesIO(file.read())
file.seek(0)
return extract_text_from_pdf(file_copy)
elif file_extension == 'docx':
return extract_text_from_docx(file)
elif file_extension == 'txt':
return file.read().decode('utf-8').strip()
else:
raise ValueError(f"Unsupported file type: {file_extension}")
except Exception as e:
st.error(f"Error reading file {file.name}: {str(e)}")
return None
def analyze_must_haves(jd_text: str, resume_text: str) -> Dict:
"""Analyze must-have requirements using the three-step process"""
try:
# Step 1: Generate must-have aspects from JD
aspects = aspects_agent.run(input=f"Analyze this job description and generate key must-have aspects only:\n\n{jd_text}")
st.session_state.aspects = aspects
# Step 2: Generate clarifications from resume
input_text = f"""Checkpoints:
{aspects}
Resume:
{resume_text}"""
clarifications = clarification_agent.run(input=input_text)
st.session_state.clarifications = clarifications
# Step 3: Final evaluation
evaluation = mh_eval_agent.forward(
job_description=jd_text,
profile=resume_text,
checkpoints=aspects,
answer_script=clarifications
)
return {
'aspects': aspects,
'clarifications': clarifications,
'evaluation': evaluation
}
except Exception as e:
st.error(f"Error in analysis pipeline: {str(e)}")
return None
def display_analysis_result(result: Dict):
if not result:
st.error("Analysis failed")
return
st.title("Must-Have Requirements Analysis")
# Display aspects
with st.expander("π― Must-Have Requirements", expanded=True):
st.write(result['aspects'])
# Display clarifications
with st.expander("π Clarifications", expanded=True):
st.write(result['clarifications'])
# Display evaluation
st.header("π Final Evaluation")
evaluation = result['evaluation']
# Display the evaluation in the requested format
st.write(evaluation)
def main():
st.title("π JD & Resume Must-Have Requirements Analyzer")
# Test API connection when the page loads
if not test_api_connection():
st.warning("β οΈ Please check your API key and model configuration in the .env file")
return
st.write("Upload a job description and resume to analyze if the candidate meets the must-have requirements.")
# Display the model being used
st.sidebar.info(f"Using model: {OPENAI_MODEL}")
# File uploaders
col1, col2 = st.columns(2)
with col1:
jd_file = st.file_uploader("Upload Job Description (PDF, DOCX, or TXT)", type=['pdf', 'docx', 'txt'])
if jd_file:
st.text_area("Job Description Content", read_file_content(jd_file), height=300)
with col2:
resume_file = st.file_uploader("Upload Resume (PDF, DOCX, or TXT)", type=['pdf', 'docx', 'txt'])
if resume_file:
st.text_area("Resume Content", read_file_content(resume_file), height=300)
# Process button
if st.button("Analyze Must-Have Requirements"):
if jd_file and resume_file:
with st.spinner("Analyzing documents..."):
try:
jd_text = read_file_content(jd_file)
resume_text = read_file_content(resume_file)
if jd_text and resume_text:
analysis = analyze_must_haves(jd_text, resume_text)
st.session_state.analysis_result = analysis
display_analysis_result(analysis)
else:
st.error("Failed to extract text from one or both files.")
except Exception as e:
st.error(f"An error occurred: {str(e)}")
else:
st.warning("Please upload both a job description and resume.")
# Display previous results if available
if st.session_state.analysis_result and not (jd_file and resume_file):
display_analysis_result(st.session_state.analysis_result)
if __name__ == "__main__":
main() |