Spaces:
Sleeping
Sleeping
datasciencedojo
commited on
Commit
•
e21cf92
1
Parent(s):
9e470ae
Update utils/utils.py
Browse files- utils/utils.py +71 -3
utils/utils.py
CHANGED
@@ -2,6 +2,8 @@ from PyPDF2 import PdfReader
|
|
2 |
from agents.agents import get_agent_groq
|
3 |
import json
|
4 |
import re
|
|
|
|
|
5 |
|
6 |
|
7 |
def parse_resume(path):
|
@@ -22,26 +24,92 @@ def parse_resumes(resumes_list):
|
|
22 |
resumes_text.append(text)
|
23 |
return resumes_text
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def generate_analysis(resume_text, job_listing_text,job_title_text, must_have,prompt_template):
|
27 |
agent = get_agent_groq()
|
28 |
resp = agent.invoke(prompt_template.format(resume=resume_text, job_listing=job_listing_text,job_title_text=job_title_text,must_have=must_have))
|
|
|
29 |
text_res=extract(resp.content)
|
30 |
#text_res=extract(text_res)
|
31 |
#chain = prompt | agent
|
32 |
-
print(text_res)
|
33 |
#text = resp.content
|
34 |
return text_res
|
35 |
|
36 |
def generate_sel_analysis(resume_text, job_listing_text,job_title_text, must_have,prompt_template):
|
|
|
|
|
37 |
#chain = prompt | agent
|
38 |
agent = get_agent_groq()
|
39 |
response = agent.invoke(prompt_template.format(resume=resume_text, job_listing=job_listing_text,job_title_text=job_title_text,must_have=must_have))
|
40 |
-
print(response.content)
|
41 |
text_res=extract_sel(response.content)
|
42 |
-
print(text_res)
|
43 |
return text_res
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def extract(content):
|
46 |
|
47 |
json_pattern = r'```\n(.*?)\n```'
|
|
|
2 |
from agents.agents import get_agent_groq
|
3 |
import json
|
4 |
import re
|
5 |
+
import time
|
6 |
+
from agents import prompts
|
7 |
|
8 |
|
9 |
def parse_resume(path):
|
|
|
24 |
resumes_text.append(text)
|
25 |
return resumes_text
|
26 |
|
27 |
+
def parse_(resumes_list):
|
28 |
+
resumes_text=[]
|
29 |
+
for resume in resumes_list:
|
30 |
+
text=parse_resume(resume)
|
31 |
+
resumes_text.append(text)
|
32 |
+
return resumes_text
|
33 |
+
|
34 |
+
|
35 |
+
from typing_extensions import Annotated, TypedDict, Optional
|
36 |
+
|
37 |
+
# Define TypedDict for structured output
|
38 |
+
class ResumeAnalysis(TypedDict):
|
39 |
+
candidate_name: Annotated[str, ..., "Name of the candidate with the highest score"]
|
40 |
+
overall_match_score: Annotated[int, ..., "sum of scores for skills_keywords_score, experience_score, education_certifications_score, and preferred_qualifications_score (Whole Number)"]
|
41 |
+
skills_keywords_score: Annotated[int, ..., "Score for Skills and Keywords (0-40)"]
|
42 |
+
skills_keywords_explanation: Annotated[str, ..., "Explanation for Skills and Keywords"]
|
43 |
+
experience_score: Annotated[int, ..., "Score for Experience (0-30)"]
|
44 |
+
experience_explanation: Annotated[str, ..., "Explanation for Experience"]
|
45 |
+
education_certifications_score: Annotated[int, ..., "Score for Education & Certifications (0-20)"]
|
46 |
+
education_certifications_explanation: Annotated[str, ..., "Explanation for Education & Certifications"]
|
47 |
+
preferred_qualifications_score: Annotated[int, ..., "Score for Preferred Qualifications (0-10)"]
|
48 |
+
preferred_qualifications_explanation: Annotated[str, ..., "Explanation for Preferred Qualifications"]
|
49 |
+
score_interpretation: Annotated[str, ..., "donot mention any numbers here, just Interpretation in words of the overall_match_score"]
|
50 |
+
|
51 |
+
# Use structured output with the LLM
|
52 |
+
|
53 |
+
def generate_analysis_new(resume_text, job_listing_text, job_title_text, must_have, prompt_template):
|
54 |
+
# Send the structured prompt to the agent and expect a structured response
|
55 |
+
agent = get_agent_groq().with_structured_output(ResumeAnalysis)
|
56 |
+
# using structured output LLM
|
57 |
+
response = agent.invoke(
|
58 |
+
prompt_template.format(
|
59 |
+
resume=resume_text,
|
60 |
+
job_listing=job_listing_text,
|
61 |
+
job_title_text=job_title_text,
|
62 |
+
must_have=must_have
|
63 |
+
)
|
64 |
+
)
|
65 |
+
response['overall_match_score']=response['skills_keywords_score']+response['education_certifications_score']+response['experience_score']+response['preferred_qualifications_score']
|
66 |
+
print(response)
|
67 |
+
return response # response is already structured as per ResumeAnalysis
|
68 |
|
69 |
def generate_analysis(resume_text, job_listing_text,job_title_text, must_have,prompt_template):
|
70 |
agent = get_agent_groq()
|
71 |
resp = agent.invoke(prompt_template.format(resume=resume_text, job_listing=job_listing_text,job_title_text=job_title_text,must_have=must_have))
|
72 |
+
#print('response of agent',resp)
|
73 |
text_res=extract(resp.content)
|
74 |
#text_res=extract(text_res)
|
75 |
#chain = prompt | agent
|
76 |
+
#print(text_res)
|
77 |
#text = resp.content
|
78 |
return text_res
|
79 |
|
80 |
def generate_sel_analysis(resume_text, job_listing_text,job_title_text, must_have,prompt_template):
|
81 |
+
prompt_templates = prompts.prompt_template_modern
|
82 |
+
generate_individual_analysis(resume_text, job_listing_text,job_title_text, must_have,prompt_templates)
|
83 |
#chain = prompt | agent
|
84 |
agent = get_agent_groq()
|
85 |
response = agent.invoke(prompt_template.format(resume=resume_text, job_listing=job_listing_text,job_title_text=job_title_text,must_have=must_have))
|
86 |
+
#print(response.content)
|
87 |
text_res=extract_sel(response.content)
|
88 |
+
#print(text_res)
|
89 |
return text_res
|
90 |
|
91 |
+
|
92 |
+
# Analyzing each resume individually and handling delays to avoid token limits
|
93 |
+
def generate_individual_analysis(resumes, job_listing_text, job_title_text, must_have, prompt_template, delay=10):
|
94 |
+
#agent = get_agent_groq()
|
95 |
+
all_results = []
|
96 |
+
|
97 |
+
for resume_text in resumes:
|
98 |
+
structured_response= generate_analysis_new(resume_text, job_listing_text, job_title_text, must_have, prompt_template)
|
99 |
+
#agent = get_agent_groq().with_structured_output(ResumeAnalysis)
|
100 |
+
# print(response)
|
101 |
+
if structured_response:
|
102 |
+
all_results.append(structured_response)
|
103 |
+
|
104 |
+
# Adding delay to avoid the 6000 tokens per minute limit
|
105 |
+
time.sleep(delay)
|
106 |
+
|
107 |
+
# Sorting results by match score (or any other criteria you prefer)
|
108 |
+
best_match = max(all_results, key=lambda x: x.get("overall_match_score", 0))
|
109 |
+
print('best_match',best_match)
|
110 |
+
print('all_results',all_results)
|
111 |
+
return all_results
|
112 |
+
|
113 |
def extract(content):
|
114 |
|
115 |
json_pattern = r'```\n(.*?)\n```'
|