Spaces:
Sleeping
Sleeping
Sanjay Malladi
commited on
Commit
·
2792fd5
1
Parent(s):
cb84821
Your commit message
Browse files- .env +3 -0
- app.py +253 -0
- mh_aspects.py +115 -0
- mh_clarification.py +101 -0
- mh_classification.py +118 -0
- mh_evaluation.py +114 -0
- prompts/__pycache__/mh_aspects.cpython-310.pyc +0 -0
- prompts/__pycache__/mh_clarification.cpython-310.pyc +0 -0
- prompts/mh_aspects.py +41 -0
- prompts/mh_clarification.py +33 -0
- prompts/mh_evaluation_prompt.py +75 -0
- requirements.txt +8 -0
.env
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
MODEL = "gpt-4o-mini"
|
2 |
+
OPENAI_API_KEY = "sk-gwsGNkYyB3dt2mnJTG1ALWeZQqUGuP48gbFNdVjURvT3BlbkFJfnriz0muCX7jgtf98uHUnM_suaZre22pH0II1drJ8A"
|
3 |
+
OPENAI_MODEL = "gpt-4o-mini"
|
app.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
import PyPDF2
|
5 |
+
from docx import Document
|
6 |
+
import io
|
7 |
+
from typing import Dict, Any, List
|
8 |
+
from pydantic import BaseModel, Field
|
9 |
+
import plotly.graph_objects as go
|
10 |
+
import json
|
11 |
+
import re
|
12 |
+
from docx.shared import Inches
|
13 |
+
from docx.enum.text import WD_ALIGN_PARAGRAPH
|
14 |
+
import plotly.io as pio
|
15 |
+
from mh_aspects import agent as aspects_agent
|
16 |
+
from mh_classification import agent as clarification_agent
|
17 |
+
from mh_evaluation import MHEvaluationAgent as mh_eval_agent
|
18 |
+
|
19 |
+
# Load environment variables
|
20 |
+
load_dotenv()
|
21 |
+
|
22 |
+
# Get model from environment
|
23 |
+
OPENAI_MODEL = os.getenv('OPENAI_MODEL', 'gpt-3.5-turbo')
|
24 |
+
|
25 |
+
# Initialize evaluation agent
|
26 |
+
mh_eval_agent = mh_eval_agent()
|
27 |
+
|
28 |
+
def test_api_connection():
|
29 |
+
"""Test if the OpenAI API is working"""
|
30 |
+
try:
|
31 |
+
# Create a test job description
|
32 |
+
test_jd = """Test Job Description
|
33 |
+
Position: Software Engineer
|
34 |
+
Requirements:
|
35 |
+
- 3+ years of Python experience
|
36 |
+
- Bachelor's degree in Computer Science
|
37 |
+
- Experience with web development
|
38 |
+
"""
|
39 |
+
|
40 |
+
# Try to get a response from the aspects agent
|
41 |
+
response = aspects_agent.run(input=f"Analyze this job description and generate key must-have aspects only:\n\n{test_jd}")
|
42 |
+
|
43 |
+
if response:
|
44 |
+
st.success("✅ API connection successful!")
|
45 |
+
return True
|
46 |
+
else:
|
47 |
+
st.error("❌ API connection failed: No response received")
|
48 |
+
return False
|
49 |
+
except Exception as e:
|
50 |
+
st.error(f"❌ API connection failed: {str(e)}")
|
51 |
+
return False
|
52 |
+
|
53 |
+
# Pydantic model for must-have requirements
|
54 |
+
class MustHaveAnalysis(BaseModel):
|
55 |
+
category: str = Field(..., description="Category (1: No must-haves mentioned, 2: Meets Requirements, 3: Does Not Meet)")
|
56 |
+
evidence: List[str] = Field(default_factory=list, description="Evidence supporting the categorization")
|
57 |
+
confidence: float = Field(default=0.8, description="Confidence score between 0 and 1")
|
58 |
+
|
59 |
+
# Set page config
|
60 |
+
st.set_page_config(
|
61 |
+
page_title="JD & Resume Analyzer",
|
62 |
+
page_icon="📄",
|
63 |
+
layout="wide"
|
64 |
+
)
|
65 |
+
|
66 |
+
# Initialize session state
|
67 |
+
if 'analysis_result' not in st.session_state:
|
68 |
+
st.session_state.analysis_result = None
|
69 |
+
if 'aspects' not in st.session_state:
|
70 |
+
st.session_state.aspects = None
|
71 |
+
if 'clarifications' not in st.session_state:
|
72 |
+
st.session_state.clarifications = None
|
73 |
+
|
74 |
+
def create_gauge_chart(value, title):
|
75 |
+
fig = go.Figure(go.Indicator(
|
76 |
+
mode="gauge+number",
|
77 |
+
value=value,
|
78 |
+
domain={'x': [0, 1], 'y': [0, 1]},
|
79 |
+
title={'text': title},
|
80 |
+
gauge={
|
81 |
+
'axis': {'range': [0, 100]},
|
82 |
+
'bar': {'color': "rgb(50, 168, 82)"},
|
83 |
+
'steps': [
|
84 |
+
{'range': [0, 33], 'color': "lightgray"},
|
85 |
+
{'range': [33, 66], 'color': "gray"},
|
86 |
+
{'range': [66, 100], 'color': "darkgray"}
|
87 |
+
],
|
88 |
+
'threshold': {
|
89 |
+
'line': {'color': "red", 'width': 4},
|
90 |
+
'thickness': 0.75,
|
91 |
+
'value': 80
|
92 |
+
}
|
93 |
+
}
|
94 |
+
))
|
95 |
+
|
96 |
+
fig.update_layout(
|
97 |
+
height=250,
|
98 |
+
margin=dict(l=10, r=10, t=50, b=10),
|
99 |
+
paper_bgcolor="rgba(0,0,0,0)",
|
100 |
+
font={'color': "#31333F"}
|
101 |
+
)
|
102 |
+
return fig
|
103 |
+
|
104 |
+
def extract_text_from_pdf(file):
|
105 |
+
try:
|
106 |
+
pdf_reader = PyPDF2.PdfReader(file)
|
107 |
+
text = ""
|
108 |
+
for page in pdf_reader.pages:
|
109 |
+
text += page.extract_text() + "\n"
|
110 |
+
return text.strip()
|
111 |
+
except Exception as e:
|
112 |
+
st.error(f"Error reading PDF: {str(e)}")
|
113 |
+
return None
|
114 |
+
|
115 |
+
def extract_text_from_docx(file):
|
116 |
+
try:
|
117 |
+
doc = Document(io.BytesIO(file.read()))
|
118 |
+
text = ""
|
119 |
+
for paragraph in doc.paragraphs:
|
120 |
+
text += paragraph.text + "\n"
|
121 |
+
return text.strip()
|
122 |
+
except Exception as e:
|
123 |
+
st.error(f"Error reading DOCX: {str(e)}")
|
124 |
+
return None
|
125 |
+
|
126 |
+
def read_file_content(file):
|
127 |
+
if file is None:
|
128 |
+
return None
|
129 |
+
|
130 |
+
file_extension = file.name.split('.')[-1].lower()
|
131 |
+
|
132 |
+
try:
|
133 |
+
if file_extension == 'pdf':
|
134 |
+
file_copy = io.BytesIO(file.read())
|
135 |
+
file.seek(0)
|
136 |
+
return extract_text_from_pdf(file_copy)
|
137 |
+
elif file_extension == 'docx':
|
138 |
+
return extract_text_from_docx(file)
|
139 |
+
elif file_extension == 'txt':
|
140 |
+
return file.read().decode('utf-8').strip()
|
141 |
+
else:
|
142 |
+
raise ValueError(f"Unsupported file type: {file_extension}")
|
143 |
+
except Exception as e:
|
144 |
+
st.error(f"Error reading file {file.name}: {str(e)}")
|
145 |
+
return None
|
146 |
+
|
147 |
+
def analyze_must_haves(jd_text: str, resume_text: str) -> Dict:
|
148 |
+
"""Analyze must-have requirements using the three-step process"""
|
149 |
+
try:
|
150 |
+
# Step 1: Generate must-have aspects from JD
|
151 |
+
aspects = aspects_agent.run(input=f"Analyze this job description and generate key must-have aspects only:\n\n{jd_text}")
|
152 |
+
st.session_state.aspects = aspects
|
153 |
+
|
154 |
+
# Step 2: Generate clarifications from resume
|
155 |
+
input_text = f"""Checkpoints:
|
156 |
+
{aspects}
|
157 |
+
|
158 |
+
Resume:
|
159 |
+
{resume_text}"""
|
160 |
+
clarifications = clarification_agent.run(input=input_text)
|
161 |
+
st.session_state.clarifications = clarifications
|
162 |
+
|
163 |
+
# Step 3: Final evaluation
|
164 |
+
evaluation = mh_eval_agent.forward(
|
165 |
+
job_description=jd_text,
|
166 |
+
profile=resume_text,
|
167 |
+
checkpoints=aspects,
|
168 |
+
answer_script=clarifications
|
169 |
+
)
|
170 |
+
|
171 |
+
return {
|
172 |
+
'aspects': aspects,
|
173 |
+
'clarifications': clarifications,
|
174 |
+
'evaluation': evaluation
|
175 |
+
}
|
176 |
+
except Exception as e:
|
177 |
+
st.error(f"Error in analysis pipeline: {str(e)}")
|
178 |
+
return None
|
179 |
+
|
180 |
+
def display_analysis_result(result: Dict):
|
181 |
+
if not result:
|
182 |
+
st.error("Analysis failed")
|
183 |
+
return
|
184 |
+
|
185 |
+
st.title("Must-Have Requirements Analysis")
|
186 |
+
|
187 |
+
# Display aspects
|
188 |
+
with st.expander("🎯 Must-Have Requirements", expanded=True):
|
189 |
+
st.write(result['aspects'])
|
190 |
+
|
191 |
+
# Display clarifications
|
192 |
+
with st.expander("🔍 Clarifications", expanded=True):
|
193 |
+
st.write(result['clarifications'])
|
194 |
+
|
195 |
+
# Display evaluation
|
196 |
+
st.header("📊 Final Evaluation")
|
197 |
+
evaluation = result['evaluation']
|
198 |
+
|
199 |
+
# Display the evaluation in the requested format
|
200 |
+
st.write(evaluation)
|
201 |
+
|
202 |
+
def main():
|
203 |
+
st.title("📄 JD & Resume Must-Have Requirements Analyzer")
|
204 |
+
|
205 |
+
# Test API connection when the page loads
|
206 |
+
if not test_api_connection():
|
207 |
+
st.warning("⚠️ Please check your API key and model configuration in the .env file")
|
208 |
+
return
|
209 |
+
|
210 |
+
st.write("Upload a job description and resume to analyze if the candidate meets the must-have requirements.")
|
211 |
+
|
212 |
+
# Display the model being used
|
213 |
+
st.sidebar.info(f"Using model: {OPENAI_MODEL}")
|
214 |
+
|
215 |
+
# File uploaders
|
216 |
+
col1, col2 = st.columns(2)
|
217 |
+
|
218 |
+
with col1:
|
219 |
+
jd_file = st.file_uploader("Upload Job Description (PDF, DOCX, or TXT)", type=['pdf', 'docx', 'txt'])
|
220 |
+
if jd_file:
|
221 |
+
st.text_area("Job Description Content", read_file_content(jd_file), height=300)
|
222 |
+
|
223 |
+
with col2:
|
224 |
+
resume_file = st.file_uploader("Upload Resume (PDF, DOCX, or TXT)", type=['pdf', 'docx', 'txt'])
|
225 |
+
if resume_file:
|
226 |
+
st.text_area("Resume Content", read_file_content(resume_file), height=300)
|
227 |
+
|
228 |
+
# Process button
|
229 |
+
if st.button("Analyze Must-Have Requirements"):
|
230 |
+
if jd_file and resume_file:
|
231 |
+
with st.spinner("Analyzing documents..."):
|
232 |
+
try:
|
233 |
+
jd_text = read_file_content(jd_file)
|
234 |
+
resume_text = read_file_content(resume_file)
|
235 |
+
|
236 |
+
if jd_text and resume_text:
|
237 |
+
analysis = analyze_must_haves(jd_text, resume_text)
|
238 |
+
st.session_state.analysis_result = analysis
|
239 |
+
display_analysis_result(analysis)
|
240 |
+
else:
|
241 |
+
st.error("Failed to extract text from one or both files.")
|
242 |
+
|
243 |
+
except Exception as e:
|
244 |
+
st.error(f"An error occurred: {str(e)}")
|
245 |
+
else:
|
246 |
+
st.warning("Please upload both a job description and resume.")
|
247 |
+
|
248 |
+
# Display previous results if available
|
249 |
+
if st.session_state.analysis_result and not (jd_file and resume_file):
|
250 |
+
display_analysis_result(st.session_state.analysis_result)
|
251 |
+
|
252 |
+
if __name__ == "__main__":
|
253 |
+
main()
|
mh_aspects.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.agents import initialize_agent, AgentType
|
6 |
+
from langchain.tools import Tool
|
7 |
+
from langchain.agents import Tool
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
from langchain_core.messages import HumanMessage
|
10 |
+
|
11 |
+
# Load environment variables
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
# Set environment variables for model API key and model type
|
15 |
+
os.environ.setdefault("DSP_CACHEBOOL", "false")
|
16 |
+
|
17 |
+
# Get API key
|
18 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
19 |
+
if not api_key:
|
20 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set")
|
21 |
+
|
22 |
+
# Initialize the chat model
|
23 |
+
model = ChatOpenAI(
|
24 |
+
model=os.getenv("MODEL", "gpt-4o-mini"),
|
25 |
+
openai_api_key=api_key,
|
26 |
+
temperature=0.0,
|
27 |
+
max_tokens=4000
|
28 |
+
)
|
29 |
+
|
30 |
+
# Define your tool to process the job description
|
31 |
+
class JobDescriptionProcessor(Tool):
|
32 |
+
def __init__(self, name: str, description: str, func):
|
33 |
+
super().__init__(name=name, func=func, description=description)
|
34 |
+
|
35 |
+
def _run(self, job_description: str):
|
36 |
+
return self.func(job_description)
|
37 |
+
|
38 |
+
# Define a function to generate aspects based on job description
|
39 |
+
def generate_aspects(job_description: str) -> str:
|
40 |
+
# Create a prompt for the model
|
41 |
+
prompt = f"""
|
42 |
+
You are an expert recruiter specialized in analyzing resumes against job descriptions (JDs). Your task is to formulate checkpoints that focus on verifying criteria that are explicitly mentioned as must-have in the JD. These checkpoints will help generate insightful responses in the next step, ensuring the resume is analyzed against the critical, non-negotiable requirements, if any, outlined in the JD.
|
43 |
+
|
44 |
+
**Input**: The input for this task will be the job description (JD).
|
45 |
+
**Output**: Formulate 2 to 3 evaluation checkpoints/criteria focused solely on the must-have requirements. These checkpoints/criteria will serve as evaluation criteria for the next stage, where the candidate's resume will be checked for evidence and reasoning.
|
46 |
+
|
47 |
+
### Steps:
|
48 |
+
1) Understand the JD and determine the number of checkpoints (between 2-3) required depending on the specifications from the JD and the context of the role. For freshers/career beginners, the number of checkpoints could be less in number.
|
49 |
+
2) With a holistic and pragmatic approach, formulate the checkpoints that cover the verifiable aspects usually available from resumes. Note that the cultural aspects or thinking process or future plans of the candidate should not be part of this exercise.
|
50 |
+
|
51 |
+
**Guidelines**:
|
52 |
+
1. Identify parameters explicitly marked as must-have in the JD.
|
53 |
+
a. Consider the context and include aspects labeled as “required,” “mandatory,” “essential,” “prerequisite,” or similar if appropriate to be considered as must-have.
|
54 |
+
b. Focus only on very critical criteria that, if missing, should lead to disqualification of the candidate.
|
55 |
+
2. Clearly differentiate between must-haves and good-to-haves/preferences.
|
56 |
+
a. Exclude any parameters described as “preferred,” “nice-to-have,” or optional.
|
57 |
+
3. If specific education, certification, or experience is not explicitly mentioned as a must-have, do not include it in this section.
|
58 |
+
|
59 |
+
**Output Format:**
|
60 |
+
Checkpoint 1: [Description of checkpoint]
|
61 |
+
Checkpoint 2: [Description of checkpoint]"""
|
62 |
+
|
63 |
+
# Get response from the model
|
64 |
+
response = model.invoke([HumanMessage(content=prompt)])
|
65 |
+
return response.content
|
66 |
+
|
67 |
+
# Define the agent tools
|
68 |
+
tools = [
|
69 |
+
JobDescriptionProcessor(
|
70 |
+
name="JobDescriptionProcessor",
|
71 |
+
description="Process job descriptions and generate aspects",
|
72 |
+
func=generate_aspects,
|
73 |
+
),
|
74 |
+
]
|
75 |
+
|
76 |
+
# Initialize the LangChain agent
|
77 |
+
agent = initialize_agent(
|
78 |
+
tools, model, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
79 |
+
)
|
80 |
+
|
81 |
+
# Main logic to execute the agent with job description input
|
82 |
+
if __name__ == "__main__":
|
83 |
+
good_jd = """
|
84 |
+
Job Description
|
85 |
+
Workday FIN Consultant
|
86 |
+
Sentient Solutions is a rapidly growing accounting outsourcing firm with clients across the United States. Sentient's highly experienced team of experts based in the US, India, and Mexico offer fully customized solutions to suit the specific business needs of CPA Firms. With AI, process automation, and a suite of scalable services, we promise success to CPA firms.
|
87 |
+
Location: Jubilee Hills, Hyderabad
|
88 |
+
Experience: 3 to 7 years
|
89 |
+
Website: https://sentientsolutions.io/
|
90 |
+
Job Summary :
|
91 |
+
1. Ensure a smooth transition and onboarding of new partner firms and tuck-ins into our Workday Financials landscape.
|
92 |
+
Ensure newly acquired firms and tuck-ins are live on Workday financials on Day 1 of deal closure
|
93 |
+
Demonstrate strong service orientation, change management skills, and overall dedication to stakeholder success at our Partner Firms
|
94 |
+
Seek to drive process and automation-based improvements to minimize the time to delivery for onboarding and the resources required to onboard new acquisitions.
|
95 |
+
Participate in overall partner firm integration and lead the conversations around financials integration. Maintain an updated playbook for all requirements to onboard an acquisition effectively.
|
96 |
+
2. Demonstrate strong knowledge in multiple Workday Financials areas to include core financials, suppliers, accounts payable, expenses, payroll, banking, assets, report writing.
|
97 |
+
Perform as a subject matter expert in the aforementioned Workday areas.
|
98 |
+
Act as the first point of service for Workday Financials related support questions
|
99 |
+
Escalate as necessary, to our external partners, manage the use of the external partnership to support workday Financials.
|
100 |
+
3. Monitor and maintain all Workday financials integrations and workflows.
|
101 |
+
Proactively resolve errors in integration and workflows
|
102 |
+
Provide suggestions to the change committee for improving integration and workflows to maximize efficiency and reduce opportunities for error.
|
103 |
+
4. Achieve an NPS score > 70 by the end of the first year for Partner Firm Experience with Workday onboarding
|
104 |
+
Maximize the Partner Firm experience by embodying our values and committing to excellent customer service.
|
105 |
+
5. VALUES & CULTURE: Uphold and embody Ascend values
|
106 |
+
Own it. We seize growth opportunities with the passion, speed and accountability of an entrepreneur.
|
107 |
+
Serve whole-heartedly. We love our people and serve them with positivity and kindness.
|
108 |
+
Be excellent. We set a high bar because our mission matters.
|
109 |
+
Win together. We stay in sync, achieve as a team and celebrate each other.
|
110 |
+
Bring the sunshine. We have fun and bring enthusiasm to make the journey joyful.
|
111 |
+
"""
|
112 |
+
|
113 |
+
# Run the LangChain agent to get the results
|
114 |
+
ans = agent.run(job_description=good_jd)
|
115 |
+
print("\n\ncriteria : \n", ans)
|
mh_clarification.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.agents import initialize_agent, AgentType
|
6 |
+
from langchain.tools import Tool
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain_core.messages import HumanMessage
|
9 |
+
|
10 |
+
# Load environment variables
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# Set environment variables for model API key and model type
|
14 |
+
os.environ.setdefault("DSP_CACHEBOOL", "false")
|
15 |
+
|
16 |
+
# Get API key
|
17 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
18 |
+
if not api_key:
|
19 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set")
|
20 |
+
|
21 |
+
# Initialize the chat model
|
22 |
+
model = ChatOpenAI(
|
23 |
+
model=os.getenv("MODEL", "gpt-3.5-turbo"),
|
24 |
+
openai_api_key=api_key,
|
25 |
+
temperature=0.0,
|
26 |
+
max_tokens=4000
|
27 |
+
)
|
28 |
+
|
29 |
+
# Define the clarification prompt
|
30 |
+
clarification_prompt = PromptTemplate(
|
31 |
+
input_variables=["checkpoints", "resume"],
|
32 |
+
template="""
|
33 |
+
You are an expert recruiter specializing in reading resumes against job descriptions. Your task is to read the checkpoints provided and extract objective and factual information (if available) from the resume to clarify these checkpoints.
|
34 |
+
|
35 |
+
**Guidelines:**
|
36 |
+
1. Analyze both explicit and implicit meanings from the resume.
|
37 |
+
2. For must-have certifications, consider only those explicitly mentioned. Do not assume.
|
38 |
+
3. For industry relevance, assess the organizations listed and determine their industries.
|
39 |
+
4. For education and certifications, verify if they match stated requirements.
|
40 |
+
5. Provide objective reasoning with factual pointers from the resume.
|
41 |
+
6. Do not hallucinate or include information not grounded in the resume.
|
42 |
+
7. If the resume lacks enough information, mention this explicitly.
|
43 |
+
|
44 |
+
**Checkpoints:**
|
45 |
+
{checkpoints}
|
46 |
+
|
47 |
+
**Resume:**
|
48 |
+
{resume}
|
49 |
+
|
50 |
+
**Output Format:**
|
51 |
+
Checkpoint 1: [Factual reasoning from resume based on checkpoint]
|
52 |
+
Checkpoint 2: [Factual reasoning from resume based on checkpoint]
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
|
56 |
+
# Define a tool to process checkpoints and resume
|
57 |
+
class ClarificationProcessor(Tool):
|
58 |
+
def __init__(self, name: str, description: str, func):
|
59 |
+
super().__init__(name=name, func=func, description=description)
|
60 |
+
|
61 |
+
def _run(self, input: str):
|
62 |
+
# Extract checkpoints and resume from the input string
|
63 |
+
try:
|
64 |
+
# Split by "Resume:" and ensure we have both parts
|
65 |
+
parts = input.split("Resume:")
|
66 |
+
if len(parts) != 2:
|
67 |
+
return "Error: Input must contain both checkpoints and resume sections"
|
68 |
+
|
69 |
+
# Extract checkpoints and resume, removing any extra whitespace
|
70 |
+
checkpoints = parts[0].replace("Checkpoints:", "").strip()
|
71 |
+
resume = parts[1].strip()
|
72 |
+
|
73 |
+
if not checkpoints or not resume:
|
74 |
+
return "Error: Both checkpoints and resume sections must not be empty"
|
75 |
+
|
76 |
+
return self.func(checkpoints, resume)
|
77 |
+
except Exception as e:
|
78 |
+
return f"Error processing input: {str(e)}"
|
79 |
+
|
80 |
+
# Define a function to generate clarifications based on checkpoints and resume
|
81 |
+
def generate_clarifications(checkpoints: str, resume: str) -> str:
|
82 |
+
# Create the prompt
|
83 |
+
prompt_text = clarification_prompt.format(checkpoints=checkpoints, resume=resume)
|
84 |
+
|
85 |
+
# Get response from the model
|
86 |
+
response = model.invoke([HumanMessage(content=prompt_text)])
|
87 |
+
return response.content
|
88 |
+
|
89 |
+
# Define the agent tools
|
90 |
+
tools = [
|
91 |
+
ClarificationProcessor(
|
92 |
+
name="ClarificationProcessor",
|
93 |
+
description="Generate clarifications based on the provided checkpoints and resume.",
|
94 |
+
func=generate_clarifications,
|
95 |
+
),
|
96 |
+
]
|
97 |
+
|
98 |
+
# Initialize the LangChain agent
|
99 |
+
agent = initialize_agent(
|
100 |
+
tools, model, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
101 |
+
)
|
mh_classification.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain.prompts import PromptTemplate
|
5 |
+
from langchain.agents import initialize_agent, AgentType
|
6 |
+
from langchain.tools import Tool
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain_core.messages import HumanMessage
|
9 |
+
from prompts.mh_clarification import clarification_prompt
|
10 |
+
|
11 |
+
# Load environment variables
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
# Set environment variables for model API key and model type
|
15 |
+
os.environ.setdefault("DSP_CACHEBOOL", "false")
|
16 |
+
|
17 |
+
# Get API key
|
18 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
19 |
+
if not api_key:
|
20 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set")
|
21 |
+
|
22 |
+
# Initialize the chat model
|
23 |
+
model = ChatOpenAI(
|
24 |
+
model=os.getenv("MODEL", "gpt-3.5-turbo"),
|
25 |
+
openai_api_key=api_key,
|
26 |
+
temperature=0.0,
|
27 |
+
max_tokens=4000
|
28 |
+
)
|
29 |
+
|
30 |
+
# Define a tool to process job descriptions
|
31 |
+
class JobDescriptionProcessor(Tool):
|
32 |
+
def __init__(self, name: str, description: str, func):
|
33 |
+
super().__init__(name=name, func=func, description=description)
|
34 |
+
|
35 |
+
def _run(self, input: str):
|
36 |
+
try:
|
37 |
+
# Extract key aspects from the job description
|
38 |
+
aspects = self.func(input)
|
39 |
+
return aspects
|
40 |
+
except Exception as e:
|
41 |
+
return f"Error processing job description: {str(e)}"
|
42 |
+
|
43 |
+
# Define a function to extract key aspects from job description
|
44 |
+
def extract_key_aspects(jd: str) -> str:
|
45 |
+
try:
|
46 |
+
# Create a prompt for extracting key aspects
|
47 |
+
prompt = f"""Extract the key aspects from this job description. Focus on:
|
48 |
+
1. Position title
|
49 |
+
2. Required qualifications
|
50 |
+
3. Experience requirements
|
51 |
+
4. Key skills
|
52 |
+
5. Must-have requirements
|
53 |
+
|
54 |
+
Job Description:
|
55 |
+
{jd}
|
56 |
+
|
57 |
+
Format the output as a clear list of key aspects."""
|
58 |
+
|
59 |
+
# Get response from the model
|
60 |
+
response = model.invoke([HumanMessage(content=prompt)])
|
61 |
+
return response.content
|
62 |
+
except Exception as e:
|
63 |
+
return f"Error extracting key aspects: {str(e)}"
|
64 |
+
|
65 |
+
# Define a tool to process checkpoints and resume
|
66 |
+
class ClarificationProcessor(Tool):
|
67 |
+
def __init__(self, name: str, description: str, func):
|
68 |
+
super().__init__(name=name, func=func, description=description)
|
69 |
+
|
70 |
+
def _run(self, input: str):
|
71 |
+
# Extract checkpoints and resume from the input string
|
72 |
+
try:
|
73 |
+
# Split by "Resume:" and ensure we have both parts
|
74 |
+
parts = input.split("Resume:")
|
75 |
+
if len(parts) != 2:
|
76 |
+
return "Error: Input must contain both checkpoints and resume sections"
|
77 |
+
|
78 |
+
# Extract checkpoints and resume, removing any extra whitespace
|
79 |
+
checkpoints = parts[0].replace("Checkpoints:", "").strip()
|
80 |
+
resume = parts[1].strip()
|
81 |
+
|
82 |
+
if not checkpoints or not resume:
|
83 |
+
return "Error: Both checkpoints and resume sections must not be empty"
|
84 |
+
|
85 |
+
return self.func(checkpoints, resume)
|
86 |
+
except Exception as e:
|
87 |
+
return f"Error processing input: {str(e)}"
|
88 |
+
|
89 |
+
# Define a function to generate clarifications based on checkpoints and resume
|
90 |
+
def generate_clarifications(checkpoints: str, resume: str) -> str:
|
91 |
+
try:
|
92 |
+
# Create the prompt
|
93 |
+
prompt_text = clarification_prompt.format(checkpoints=checkpoints, resume=resume)
|
94 |
+
|
95 |
+
# Get response from the model
|
96 |
+
response = model.invoke([HumanMessage(content=prompt_text)])
|
97 |
+
return response.content
|
98 |
+
except Exception as e:
|
99 |
+
return f"Error generating clarifications: {str(e)}"
|
100 |
+
|
101 |
+
# Define the agent tools
|
102 |
+
tools = [
|
103 |
+
JobDescriptionProcessor(
|
104 |
+
name="JobDescriptionProcessor",
|
105 |
+
description="Extract key aspects from a job description.",
|
106 |
+
func=extract_key_aspects,
|
107 |
+
),
|
108 |
+
ClarificationProcessor(
|
109 |
+
name="ClarificationProcessor",
|
110 |
+
description="Generate clarifications based on the provided checkpoints and resume.",
|
111 |
+
func=generate_clarifications,
|
112 |
+
),
|
113 |
+
]
|
114 |
+
|
115 |
+
# Initialize the LangChain agent
|
116 |
+
agent = initialize_agent(
|
117 |
+
tools, model, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
|
118 |
+
)
|
mh_evaluation.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from typing import List, Optional
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langchain.chains import LLMChain
|
6 |
+
from langchain.prompts import PromptTemplate
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from langchain_core.messages import HumanMessage
|
9 |
+
|
10 |
+
# Load environment variables
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
class BaseAgent:
|
14 |
+
def __init__(self, tools: Optional[List] = None):
|
15 |
+
# Get API key
|
16 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
17 |
+
if not api_key:
|
18 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set")
|
19 |
+
|
20 |
+
self.model = ChatOpenAI(
|
21 |
+
model=os.getenv("MODEL", "gpt-3.5-turbo"),
|
22 |
+
openai_api_key=api_key,
|
23 |
+
max_tokens=4000,
|
24 |
+
temperature=0.0,
|
25 |
+
top_p=1
|
26 |
+
)
|
27 |
+
|
28 |
+
class MHEvaluationAgent(BaseAgent):
|
29 |
+
def __init__(self, tools: Optional[List] = None):
|
30 |
+
super().__init__(tools)
|
31 |
+
|
32 |
+
# Define the evaluation prompt
|
33 |
+
self.evaluation_prompt = PromptTemplate(
|
34 |
+
input_variables=["job_description", "candidates_profile", "checkpoints", "answer_script"],
|
35 |
+
template="""
|
36 |
+
You are an expert recruiter specializing in evaluating resumes against job descriptions.
|
37 |
+
Your task is to evaluate and assign a categorisation for the candidate's resume based on the "checkpoints" and "answer_script" provided to understand how well it aligns with the JD. Also provide a brief reasoning. You are required to take a pragmatic and holistic approach considering the context of the resume and understand the implied aspects as well. For example, if the JD specifies requirement of Graduation and the resume mentions post-graduation, it is implied that the person holds graduation and should be considered as such.
|
38 |
+
|
39 |
+
Think step by step and follow the instructions provided below:
|
40 |
+
|
41 |
+
*Input*: The input for this task will be "Checkpoints" and " Answer Script" and "Job Description".
|
42 |
+
*Output*: The output should be category of the resume and a summary of evidence and reasoning explaining the observation and the reasons for the categorisation.
|
43 |
+
|
44 |
+
### Steps:
|
45 |
+
|
46 |
+
Step1 : *Understand the Job Description along with checkpoints and answer script provided*
|
47 |
+
|
48 |
+
Step 2 : Analyse if the answers from the checkpoints and answer script satisfy the must-haves while considering the following aspects :
|
49 |
+
|
50 |
+
a) If there are any checkpoints related to years of experience, do not strictly focus on just the number of years in the literary sense. The number of years should be given due importance, but more importantly, considered holistically taking into account the context of the role, responsibilities handled by the candidate, etc. Minor deviations in number of years of experience should not lead to disqualification.
|
51 |
+
|
52 |
+
b) While considering the other checkpoints and answers, take a holistic and a pragmatic approach in understanding and give due importance to both the explicit and implied experiences and skills based on the role/responsibilities, context of the roles and past experiences of the individual.
|
53 |
+
|
54 |
+
C) If there are any checkpoints related to Education, consider the context while evaluating. For example, if the JD specifies requirement of Graduation and the resume mentions post-graduation, it is implied that the person qualifies for that checkpoint.
|
55 |
+
|
56 |
+
Step 3 : Based on the understanding from Step 1 and Step 2, categorise the resume following the criteria specified below :
|
57 |
+
|
58 |
+
a) Category I : JD does not explicitly or implicitly specify any must-haves or essentials for the resume to be considered for the role.
|
59 |
+
b) Category II: Satisfies all must-haves explicitly or implicitly. If there is slight uncertainty, give benefit of doubt to the candidate and place him/her in Category II.
|
60 |
+
c) Category III: Lacks one or more must-haves mentioned in the JD.
|
61 |
+
|
62 |
+
Step 4. *Provide factual evidence:*
|
63 |
+
Provide reasoning for the rating along with observations and explaining why the candidate has been assigned to a particular category. Include specific examples from the "checkpoints" and "Answer script" to support your categorisation.
|
64 |
+
|
65 |
+
### Output Format:
|
66 |
+
### Output Format:
|
67 |
+
**category**: category I/II/III based on the evidence.
|
68 |
+
**evidence**: Provide a concise justification for categorization in only 40-50 words explaining why the candidate's relevant skills and expertise does or does not align with the skills required for the role outlined in the job description.
|
69 |
+
|
70 |
+
Job Description:
|
71 |
+
{job_description}
|
72 |
+
|
73 |
+
Candidate's Resume:
|
74 |
+
{candidates_profile}
|
75 |
+
|
76 |
+
Checkpoints:
|
77 |
+
{checkpoints}
|
78 |
+
|
79 |
+
Answer Script:
|
80 |
+
{answer_script}
|
81 |
+
"""
|
82 |
+
)
|
83 |
+
|
84 |
+
def forward(
|
85 |
+
self,
|
86 |
+
job_description: Optional[str] = None,
|
87 |
+
profile: Optional[str] = None,
|
88 |
+
checkpoints: Optional[str] = None,
|
89 |
+
answer_script: Optional[str] = None,
|
90 |
+
retries: int = 5
|
91 |
+
) -> Optional[str]:
|
92 |
+
if job_description:
|
93 |
+
for _ in range(retries):
|
94 |
+
try:
|
95 |
+
# Create the prompt
|
96 |
+
prompt_text = self.evaluation_prompt.format(
|
97 |
+
job_description=job_description,
|
98 |
+
candidates_profile=profile or "",
|
99 |
+
checkpoints=checkpoints or "",
|
100 |
+
answer_script=answer_script or ""
|
101 |
+
)
|
102 |
+
|
103 |
+
# Get response from the model
|
104 |
+
response = self.model.invoke([HumanMessage(content=prompt_text)])
|
105 |
+
return response.content
|
106 |
+
except Exception as e:
|
107 |
+
print(e)
|
108 |
+
time.sleep(3)
|
109 |
+
return None
|
110 |
+
|
111 |
+
if __name__ == "__main__":
|
112 |
+
agent = MHEvaluationAgent()
|
113 |
+
ans = agent.forward(job_description="good_jd", profile="profile")
|
114 |
+
print("\n\nResult:", ans)
|
prompts/__pycache__/mh_aspects.cpython-310.pyc
ADDED
Binary file (3.34 kB). View file
|
|
prompts/__pycache__/mh_clarification.cpython-310.pyc
ADDED
Binary file (1.57 kB). View file
|
|
prompts/mh_aspects.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.llms import OpenAI
|
6 |
+
|
7 |
+
class AspectsStructure(BaseModel):
|
8 |
+
checkpoints: List[str]
|
9 |
+
|
10 |
+
prompt_template = PromptTemplate(
|
11 |
+
input_variables=["job_description"],
|
12 |
+
template="""
|
13 |
+
You are an expert recruiter specialized in analyzing resumes against job descriptions (JDs). Your task is to formulate checkpoints that focus on verifying criteria that are explicitly mentioned as must-have in the JD. These checkpoints will help generate insightful responses in the next step, ensuring the resume is analyzed against the critical, non-negotiable requirements, if any, outlined in the JD.
|
14 |
+
|
15 |
+
**Input**: The input for this task will be the job description (JD).
|
16 |
+
**Output**: Formulate 2 to 3 evaluation checkpoints/criteria focused solely on the must-have requirements. These checkpoints/criteria will serve as evaluation criteria for the next stage, where the candidate's resume will be checked for evidence and reasoning.
|
17 |
+
|
18 |
+
### Steps:
|
19 |
+
1) Understand the JD and determine the number of checkpoints (between 2-3) required depending on the specifications from the JD and the context of the role. For freshers/career beginners, the number of checkpoints could be less in number.
|
20 |
+
2) With a holistic and pragmatic approach, formulate the checkpoints that cover the verifiable aspects usually available from resumes. Note that the cultural aspects or thinking process or future plans of the candidate should not be part of this exercise.
|
21 |
+
|
22 |
+
**Guidelines**:
|
23 |
+
1. Identify parameters explicitly marked as must-have in the JD.
|
24 |
+
a. Consider the context and include aspects labeled as “required,” “mandatory,” “essential,” “prerequisite,” or similar if appropriate to be considered as must-have.
|
25 |
+
b. Focus only on very critical criteria that, if missing, should lead to disqualification of the candidate.
|
26 |
+
2. Clearly differentiate between must-haves and good-to-haves/preferences.
|
27 |
+
a. Exclude any parameters described as “preferred,” “nice-to-have,” or optional.
|
28 |
+
3. If specific education, certification, or experience is not explicitly mentioned as a must-have, do not include it in this section.
|
29 |
+
|
30 |
+
**Output Format:**
|
31 |
+
Checkpoint 1: [Description of checkpoint]
|
32 |
+
Checkpoint 2: [Description of checkpoint]
|
33 |
+
"""
|
34 |
+
)
|
35 |
+
|
36 |
+
class MhAspectsSignature:
|
37 |
+
def __init__(self, llm: OpenAI):
|
38 |
+
self.chain = LLMChain(llm=llm, prompt=prompt_template)
|
39 |
+
|
40 |
+
def generate_checkpoints(self, job_description: str) -> str:
|
41 |
+
return self.chain.run(job_description=job_description)
|
prompts/mh_clarification.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
|
4 |
+
# Define the prompt template
|
5 |
+
clarification_prompt = PromptTemplate(
|
6 |
+
input_variables=["checkpoints", "resume"],
|
7 |
+
template="""
|
8 |
+
You are an expert recruiter specializing in reading resumes against job descriptions. Your task is to read the checkpoints provided and extract objective and factual information (if available) from the resume to clarify these checkpoints.
|
9 |
+
|
10 |
+
**Guidelines:**
|
11 |
+
1. Analyze both explicit and implicit meanings from the resume.
|
12 |
+
2. For must-have certifications, consider only those explicitly mentioned. Do not assume.
|
13 |
+
3. For industry relevance, assess the organizations listed and determine their industries.
|
14 |
+
4. For education and certifications, verify if they match stated requirements.
|
15 |
+
5. Provide objective reasoning with factual pointers from the resume.
|
16 |
+
6. Do not hallucinate or include information not grounded in the resume.
|
17 |
+
7. If the resume lacks enough information, mention this explicitly.
|
18 |
+
|
19 |
+
**Checkpoints:**
|
20 |
+
{checkpoints}
|
21 |
+
|
22 |
+
**Resume:**
|
23 |
+
{resume}
|
24 |
+
|
25 |
+
**Output Format:**
|
26 |
+
[✅ Must-Haves
|
27 |
+
Category: 2 ✅ Meets Requirements
|
28 |
+
Evidence:
|
29 |
+
• [List specific evidence points for each must-have requirement]
|
30 |
+
"musthave": "[List the must-have requirements exactly as provided]"
|
31 |
+
"musthave": "[Provide detailed analysis of each must-have requirement]"]
|
32 |
+
"""
|
33 |
+
)
|
prompts/mh_evaluation_prompt.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.chat_models import ChatOpenAI
|
6 |
+
|
7 |
+
# Define the structured output model
|
8 |
+
class MHEvaluationResult(BaseModel):
|
9 |
+
category: str # Category I, II, or III
|
10 |
+
evidence: str # Justification for the categorization
|
11 |
+
|
12 |
+
# Define the prompt template
|
13 |
+
resume_evaluation_prompt = PromptTemplate(
|
14 |
+
input_variables=["job_description", "candidates_profile", "checkpoints", "answer_script"],
|
15 |
+
template="""
|
16 |
+
You are an expert recruiter specializing in evaluating resumes against job descriptions.
|
17 |
+
Your task is to evaluate and assign a categorization for the candidate's resume based on the "checkpoints" and "answer_script" provided to understand how well it aligns with the JD. Also, provide a brief reasoning. You are required to take a pragmatic and holistic approach considering the context of the resume and implied aspects.
|
18 |
+
|
19 |
+
### Steps:
|
20 |
+
Step 1: Understand the Job Description along with the checkpoints and answer script.
|
21 |
+
Step 2: Analyze if the answers from the checkpoints and answer script satisfy the must-haves while considering the following aspects:
|
22 |
+
- Years of experience: Consider responsibilities and role context rather than strictly adhering to a numeric limit.
|
23 |
+
- Education: If a post-graduate degree is mentioned, assume the person has an undergraduate degree.
|
24 |
+
- Implicit and explicit matches: Identify if relevant skills and experience satisfy the role.
|
25 |
+
|
26 |
+
Step 3: Based on analysis, categorize the resume into:
|
27 |
+
- **Category I:** JD does not explicitly or implicitly specify any must-haves.
|
28 |
+
- **Category II:** Satisfies all must-haves explicitly or implicitly.
|
29 |
+
- **Category III:** Lacks one or more must-haves.
|
30 |
+
|
31 |
+
Step 4: Provide evidence supporting the categorization.
|
32 |
+
|
33 |
+
**Job Description:**
|
34 |
+
{job_description}
|
35 |
+
|
36 |
+
**Candidate's Resume:**
|
37 |
+
{candidates_profile}
|
38 |
+
|
39 |
+
**Checkpoints:**
|
40 |
+
{checkpoints}
|
41 |
+
|
42 |
+
**Answer Script:**
|
43 |
+
{answer_script}
|
44 |
+
|
45 |
+
### Output Format:
|
46 |
+
- **category**: (I/II/III)
|
47 |
+
- **evidence**: A concise 70-100 word justification.
|
48 |
+
|
49 |
+
Output:
|
50 |
+
- Category: [Assign category]
|
51 |
+
- Evidence: [Provide justification]
|
52 |
+
"""
|
53 |
+
)
|
54 |
+
|
55 |
+
# Initialize the LLM (e.g., GPT-4)
|
56 |
+
llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0)
|
57 |
+
|
58 |
+
# Create an LLMChain
|
59 |
+
resume_evaluation_chain = LLMChain(llm=llm, prompt=resume_evaluation_prompt)
|
60 |
+
|
61 |
+
# Example input
|
62 |
+
job_description = "Looking for a Data Scientist with 3+ years of experience in Python and ML. A Master's degree is preferred."
|
63 |
+
candidates_profile = "John Doe has a Master's in Data Science and 4 years of Python experience in ML projects."
|
64 |
+
checkpoints = "1. Must have Python experience. 2. Must have a Master's degree. 3. Must have 3+ years of ML experience."
|
65 |
+
answer_script = "John Doe explicitly mentions 4 years of Python and ML experience, along with a Master's degree."
|
66 |
+
|
67 |
+
# Run the evaluation
|
68 |
+
response = resume_evaluation_chain.run(
|
69 |
+
job_description=job_description,
|
70 |
+
candidates_profile=candidates_profile,
|
71 |
+
checkpoints=checkpoints,
|
72 |
+
answer_script=answer_script
|
73 |
+
)
|
74 |
+
|
75 |
+
print(response)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
langchain
|
3 |
+
langchain-openai
|
4 |
+
python-dotenv
|
5 |
+
openai
|
6 |
+
PyPDF2
|
7 |
+
python-docx
|
8 |
+
langchain[openai]
|