HARISH20205 commited on
Commit
9ac3eaa
·
1 Parent(s): a432ed7

skills missing elements

Browse files
Process/ats_parser.py CHANGED
@@ -1,6 +1,6 @@
1
  import re
2
  import logging
3
- from .response import get_response
4
  from pydantic import BaseModel, TypeAdapter
5
  import json
6
  import traceback
@@ -8,6 +8,7 @@ import traceback
8
  # Set up logging
9
  logger = logging.getLogger(__name__)
10
 
 
11
  class Section:
12
  name: str
13
  email: str
@@ -18,6 +19,7 @@ class Section:
18
  certifications: str
19
  areas_of_interest: str
20
 
 
21
  def deep_get(dictionary, keys, default=None):
22
  logger.debug(f"Accessing deep keys {keys} in dictionary")
23
  try:
@@ -32,6 +34,7 @@ def deep_get(dictionary, keys, default=None):
32
  logger.error(f"Error in deep_get function: {e}")
33
  return default
34
 
 
35
  def extract_resume_details(resume: str):
36
  logger.info("Starting resume details extraction")
37
  """
@@ -82,7 +85,7 @@ def extract_resume_details(resume: str):
82
  logger.info("Sending resume to get_response function")
83
  combined_output = get_response(prompt=resume, task=system_ins)
84
  logger.debug("Raw response received from get_response")
85
-
86
  logger.info("Attempting to parse response to JSON")
87
  result = json.loads(combined_output)
88
  logger.debug("Successfully parsed response to JSON")
 
1
  import re
2
  import logging
3
+ from .response import get_response
4
  from pydantic import BaseModel, TypeAdapter
5
  import json
6
  import traceback
 
8
  # Set up logging
9
  logger = logging.getLogger(__name__)
10
 
11
+
12
  class Section:
13
  name: str
14
  email: str
 
19
  certifications: str
20
  areas_of_interest: str
21
 
22
+
23
  def deep_get(dictionary, keys, default=None):
24
  logger.debug(f"Accessing deep keys {keys} in dictionary")
25
  try:
 
34
  logger.error(f"Error in deep_get function: {e}")
35
  return default
36
 
37
+
38
  def extract_resume_details(resume: str):
39
  logger.info("Starting resume details extraction")
40
  """
 
85
  logger.info("Sending resume to get_response function")
86
  combined_output = get_response(prompt=resume, task=system_ins)
87
  logger.debug("Raw response received from get_response")
88
+
89
  logger.info("Attempting to parse response to JSON")
90
  result = json.loads(combined_output)
91
  logger.debug("Successfully parsed response to JSON")
Process/extract.py CHANGED
@@ -4,18 +4,19 @@ from PIL import Image
4
  import io
5
  import requests
6
 
 
7
  def extract_text_from_pdf(file_path_or_url):
8
  text = ""
9
-
10
  # Check if the file_path_or_url is a URL
11
- if file_path_or_url.startswith(('http://', 'https://')):
12
  # Download the PDF file from URL
13
  response = requests.get(file_path_or_url)
14
  if response.status_code != 200:
15
  raise Exception(f"Failed to download the file: {response.status_code}")
16
-
17
  # Open the PDF from the downloaded bytes
18
- doc = fitz.open(stream=io.BytesIO(response.content), filetype="pdf")
19
  else:
20
  # Open the PDF from a local file path
21
  doc = fitz.open(file_path_or_url)
@@ -24,7 +25,7 @@ def extract_text_from_pdf(file_path_or_url):
24
  page = doc.load_page(page_num)
25
  # Try to extract text
26
  page_text = page.get_text()
27
-
28
  if page_text.strip(): # If text is found
29
  text += page_text
30
  else: # If no text, use OCR
@@ -35,6 +36,7 @@ def extract_text_from_pdf(file_path_or_url):
35
 
36
  return text
37
 
 
38
  # Example usage with Firebase URL
39
  # firebase_url = "https://firebasestorage.googleapis.com/v0/b/resumeats-50ccf.firebasestorage.app/o/uploads%2Fsanthoshrajan776%40gmail.com%2FSanthoshNatarajan_InternshalaResume%20(1).pdf?alt=media&token=f11f9601-6550-4e64-bba6-a2b699a148af"
40
  # text = extract_text_from_pdf(firebase_url)
 
4
  import io
5
  import requests
6
 
7
+
8
  def extract_text_from_pdf(file_path_or_url):
9
  text = ""
10
+
11
  # Check if the file_path_or_url is a URL
12
+ if file_path_or_url.startswith(("http://", "https://")):
13
  # Download the PDF file from URL
14
  response = requests.get(file_path_or_url)
15
  if response.status_code != 200:
16
  raise Exception(f"Failed to download the file: {response.status_code}")
17
+
18
  # Open the PDF from the downloaded bytes
19
+ doc = fitz.open(stream=io.BytesIO(response.content), filetype="pdf")a
20
  else:
21
  # Open the PDF from a local file path
22
  doc = fitz.open(file_path_or_url)
 
25
  page = doc.load_page(page_num)
26
  # Try to extract text
27
  page_text = page.get_text()
28
+
29
  if page_text.strip(): # If text is found
30
  text += page_text
31
  else: # If no text, use OCR
 
36
 
37
  return text
38
 
39
+
40
  # Example usage with Firebase URL
41
  # firebase_url = "https://firebasestorage.googleapis.com/v0/b/resumeats-50ccf.firebasestorage.app/o/uploads%2Fsanthoshrajan776%40gmail.com%2FSanthoshNatarajan_InternshalaResume%20(1).pdf?alt=media&token=f11f9601-6550-4e64-bba6-a2b699a148af"
42
  # text = extract_text_from_pdf(firebase_url)
Process/models.py CHANGED
@@ -1,19 +1,18 @@
1
  from django.db import models
2
 
3
  # Create your models here.
4
-
 
5
  class EndPoint(models.Model):
6
  user_id = models.IntegerField()
7
  resume = models.TextField()
8
  job_description = models.TextField()
9
- time = models.DateTimeField(auto_now_add=True)
10
 
11
  def __str__(self):
12
  return {
13
- 'user_id':self.user_id,
14
- 'resume':self.resume,
15
- 'job_description':self.job_description,
16
- 'time':self.time
17
  }
18
-
19
-
 
1
  from django.db import models
2
 
3
  # Create your models here.
4
+
5
+
6
  class EndPoint(models.Model):
7
  user_id = models.IntegerField()
8
  resume = models.TextField()
9
  job_description = models.TextField()
10
+ time = models.DateTimeField(auto_now_add=True)
11
 
12
  def __str__(self):
13
  return {
14
+ "user_id": self.user_id,
15
+ "resume": self.resume,
16
+ "job_description": self.job_description,
17
+ "time": self.time,
18
  }
 
 
Process/response.py CHANGED
@@ -7,18 +7,23 @@ from google.genai import types
7
  load_dotenv()
8
 
9
 
 
10
 
11
- sys_instruct="Provide the output in JSON format where the key is the topic and the value is a list of relevant contents. Ensure the response is clear, user friendly, structured."
12
- def get_response(prompt,task):
13
  client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
14
 
15
  response = client.models.generate_content(
16
  model="gemini-2.0-flash",
17
  config=types.GenerateContentConfig(
18
- system_instruction=task+sys_instruct,response_mime_type='application/json',temperature=0.6),
19
- contents=prompt
 
 
 
20
  )
21
  # print(response.text)
22
  return response.text
23
 
24
- # get_response("What is AI?","explain the given prompt")
 
 
7
  load_dotenv()
8
 
9
 
10
+ sys_instruct = "Provide the output in JSON format where the key is the topic and the value is a list of relevant contents. Ensure the response is clear, user friendly, structured."
11
 
12
+
13
+ def get_response(prompt, task, temperature=0.75):
14
  client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
15
 
16
  response = client.models.generate_content(
17
  model="gemini-2.0-flash",
18
  config=types.GenerateContentConfig(
19
+ system_instruction=task + sys_instruct,
20
+ response_mime_type="application/json",
21
+ temperature=temperature,
22
+ ),
23
+ contents=prompt,
24
  )
25
  # print(response.text)
26
  return response.text
27
 
28
+
29
+ # get_response("What is AI?","explain the given prompt")
Process/urls.py CHANGED
@@ -3,9 +3,8 @@ from . import views
3
  from .change import process_change
4
 
5
  urlpatterns = [
6
- path("",views.home,name="welcome"),
7
- path('process_resume/', views.process_resume, name='handle_request'),
8
- path('process_change/', process_change, name="handle_change"),
9
- path('verify_api/', views.verify_api, name='verify_api'),
10
  ]
11
-
 
3
  from .change import process_change
4
 
5
  urlpatterns = [
6
+ path("", views.home, name="welcome"),
7
+ path("process_resume/", views.process_resume, name="handle_request"),
8
+ path("process_change/", process_change, name="handle_change"),
9
+ path("verify_api/", views.verify_api, name="verify_api"),
10
  ]
 
Process/utils.py CHANGED
@@ -19,16 +19,17 @@ Provide responses in this exact JSON format:
19
  Ensure the score is always a number between 0-10.
20
  """
21
 
 
22
  class ATSResumeParser:
23
  def __init__(self):
24
  logger.info("Initializing ATSResumeParser")
25
  self.score_weights = {
26
- 'skills_match': 30,
27
- 'experience_relevance': 25,
28
- 'education_relevance': 10,
29
- 'overall_formatting': 15,
30
- 'keyword_optimization': 10,
31
- 'extra_sections': 10
32
  }
33
  self.total_weight = sum(self.score_weights.values())
34
  logger.debug(f"Score weights configured with total weight: {self.total_weight}")
@@ -39,136 +40,174 @@ class ATSResumeParser:
39
  logger.debug("Parsing Gemini API response")
40
  response = json.loads(response_text)
41
  result = {
42
- 'score': float(response['score']),
43
- 'matching': response.get('matching_elements', []),
44
- 'missing': response.get('missing_elements', []),
45
- 'explanation': response.get('explanation', '')
46
  }
47
  logger.debug(f"Successfully parsed response with score: {result['score']}")
48
  return result
49
  except (json.JSONDecodeError, KeyError, ValueError) as e:
50
  logger.error(f"Error parsing Gemini response: {e}")
51
  logger.debug(f"Failed response content: {response_text}")
52
- return {'score': 5.0, 'matching': [], 'missing': [], 'explanation': ''}
53
  except Exception as e:
54
  logger.error(f"Unexpected error parsing Gemini response: {e}")
55
  logger.debug(traceback.format_exc())
56
- return {'score': 5.0, 'matching': [], 'missing': [], 'explanation': ''}
57
 
58
  def _score_skills(self, skills: List[str], job_description: Optional[str]) -> Dict:
59
  """Score skills with optimized processing"""
60
  if not skills:
61
- return {'score': 0, 'matching': [], 'missing': [], 'explanation': 'No skills provided'}
62
-
63
- base_score = 70
64
-
 
 
 
 
 
65
  skills_length = len(skills)
66
  if skills_length >= 5:
67
  base_score += 10
68
  if skills_length >= 10:
69
  base_score += 10
70
-
71
  if not job_description:
72
- return {'score': base_score, 'matching': skills, 'missing': [], 'explanation': 'No job description provided'}
 
 
 
 
 
73
 
74
- prompt = f"Skills: {','.join(skills[:20])}. Job description: {job_description[:500]}. Rate match."
75
-
76
- response = self._parse_gemini_response(
77
- get_response(prompt, SYSTEM_INSTRUCTION)
78
- )
 
 
 
79
  return {
80
- 'score': (base_score + (response['score'] * 10)) / 2,
81
- 'matching': response['matching'],
82
- 'missing': response['missing'],
83
- 'explanation': response['explanation']
84
  }
85
 
86
- def _score_experience(self, experience: List[Dict], job_description: Optional[str]) -> Dict:
 
 
87
  """Score experience with optimized processing"""
88
  if not experience:
89
- return {'score': 0, 'matching': [], 'missing': [], 'explanation': 'No experience provided'}
90
-
 
 
 
 
 
91
  base_score = 60
92
-
93
- required_keys = {'title', 'company', 'description'}
94
- improvement_keywords = {'increased', 'decreased', 'improved', '%', 'reduced'}
95
-
96
  for exp in experience:
97
  if required_keys.issubset(exp.keys()):
98
  base_score += 10
99
-
100
- description = exp.get('description', '')
101
- if description and any(keyword in description for keyword in improvement_keywords):
 
 
102
  base_score += 5
103
-
104
  if not job_description:
105
- return {'score': base_score, 'matching': [], 'missing': [], 'explanation': 'No job description provided'}
106
-
107
- simplified_exp = [{'title': e.get('title', ''), 'description': e.get('description', '')[:100]}
108
- for e in experience[:3]]
109
-
 
 
 
 
 
 
 
110
  prompt = f"Experience: {json.dumps(simplified_exp)}. Job description: {job_description[:500]}. Rate match."
111
-
112
- response = self._parse_gemini_response(
113
- get_response(prompt, SYSTEM_INSTRUCTION)
114
- )
115
  return {
116
- 'score': (base_score + (response['score'] * 10)) / 2,
117
- 'matching': response['matching'],
118
- 'missing': response['missing'],
119
- 'explanation': response['explanation']
120
  }
121
 
122
  def _score_education(self, education: List[Dict]) -> Dict:
123
  """Score education with optimized processing"""
124
  if not education:
125
- return {'score': 0, 'matching': [], 'missing': [], 'explanation': 'No education provided'}
126
-
 
 
 
 
 
127
  score = 70
128
  matching = []
129
-
130
- required_keys = {'institution', 'degree', 'start_date', 'end_date'}
131
-
132
  for edu in education:
133
- gpa = edu.get('gpa')
134
  if gpa and float(gpa) > 3.0:
135
  score += 10
136
  matching.append(f"Strong GPA: {gpa}")
137
-
138
  if required_keys.issubset(edu.keys()):
139
  score += 10
140
- matching.append(f"{edu.get('degree', '')} from {edu.get('institution', '')}")
141
-
 
 
142
  return {
143
- 'score': min(100, score),
144
- 'matching': matching,
145
- 'missing': [],
146
- 'explanation': 'Education assessment completed'
147
  }
148
 
149
  def _score_formatting(self, structured_data: Dict) -> Dict:
150
  """Score formatting with optimized processing"""
151
  score = 100
152
-
153
- contact_fields = ('name', 'email', 'phone')
154
- essential_sections = ('skills', 'experience', 'education')
155
-
156
  structured_keys = set(structured_data.keys())
157
-
158
- missing_contacts = [field for field in contact_fields if field not in structured_keys]
 
 
159
  if missing_contacts:
160
  score -= 20
161
-
162
- missing_sections = [section for section in essential_sections if section not in structured_keys]
 
 
163
  missing_penalty = 15 * len(missing_sections)
164
  if missing_sections:
165
  score -= missing_penalty
166
-
167
  return {
168
- 'score': max(0, score),
169
- 'matching': [field for field in contact_fields if field in structured_keys],
170
- 'missing': missing_contacts + missing_sections,
171
- 'explanation': 'Format assessment completed'
172
  }
173
 
174
  def _score_extra(self, structured_data: Dict) -> Dict:
@@ -182,102 +221,125 @@ class ATSResumeParser:
182
  "patents": 15,
183
  "professional_affiliations": 10,
184
  "portfolio_links": 10,
185
- "summary_or_objective": 10
186
  }
187
-
188
  total_possible = sum(extra_sections.values())
189
-
190
  structured_keys = set(structured_data.keys())
191
-
192
  score = 0
193
  matching = []
194
  missing = []
195
-
196
  for section, weight in extra_sections.items():
197
  if section in structured_keys and structured_data.get(section):
198
  score += weight
199
- matching.append(section.replace('_', ' ').title())
200
  else:
201
- missing.append(section.replace('_', ' ').title())
202
-
203
  normalized_score = (score * 100) // total_possible if total_possible > 0 else 0
204
-
205
  return {
206
- 'score': normalized_score,
207
- 'matching': matching,
208
- 'missing': missing,
209
- 'explanation': 'Additional sections assessment completed'
210
  }
211
 
212
- def parse_and_score(self, structured_data: Dict, job_description: Optional[str] = None) -> Dict:
 
 
213
  """Parse and score resume with parallel processing"""
214
  scores = {}
215
- feedback = {'strengths': [], 'improvements': []}
216
  detailed_feedback = {}
217
-
218
  with concurrent.futures.ThreadPoolExecutor() as executor:
219
  # Define tasks to run in parallel
220
  tasks = {
221
- 'skills_match': executor.submit(self._score_skills, structured_data.get('skills', []), job_description),
222
- 'experience_relevance': executor.submit(self._score_experience, structured_data.get('experience', []), job_description),
223
- 'education_relevance': executor.submit(self._score_education, structured_data.get('education', [])),
224
- 'overall_formatting': executor.submit(self._score_formatting, structured_data),
225
- 'extra_sections': executor.submit(self._score_extra, structured_data)
 
 
 
 
 
 
 
 
 
 
 
 
226
  }
227
-
228
  total_score = 0
229
  for category, future in tasks.items():
230
  result = future.result()
231
-
232
- scores[category] = result['score']
233
-
234
  weight = self.score_weights[category] / 100
235
- total_score += result['score'] * weight
236
-
237
  detailed_feedback[category] = {
238
- 'matching_elements': result['matching'],
239
- 'missing_elements': result['missing'],
240
- 'explanation': result['explanation']
241
  }
242
-
243
- if result['score'] >= 80:
244
- feedback['strengths'].append(f"Strong {category.replace('_', ' ')}")
245
- elif result['score'] < 60:
246
- feedback['improvements'].append(f"Improve {category.replace('_', ' ')}")
247
-
 
 
248
  return {
249
- 'total_score': round(total_score, 2),
250
- 'detailed_scores': scores,
251
- 'feedback': feedback,
252
- 'detailed_feedback': detailed_feedback
253
  }
254
 
255
- def generate_ats_score(structured_data: Union[Dict, str], job_des_text: Optional[str] = None) -> Dict:
 
 
 
256
  """Generate ATS score with optimized processing"""
257
  try:
258
  logger.info("Starting ATS score generation")
259
  if not structured_data:
260
  return {"error": "No resume data provided"}
261
-
262
  if isinstance(structured_data, str):
263
  try:
264
  structured_data = json.loads(structured_data)
265
  except json.JSONDecodeError:
266
  return {"error": "Invalid JSON format in resume data"}
267
-
268
  parser = ATSResumeParser()
269
  result = parser.parse_and_score(structured_data, job_des_text)
270
-
271
  logger.info("ATS score generation completed successfully")
272
  return {
273
- 'ats_score': result['total_score'],
274
- 'detailed_scores': result['detailed_scores'],
275
- 'feedback': result['feedback'],
276
- 'detailed_feedback': result['detailed_feedback']
277
  }
278
-
279
  except Exception as e:
280
  error_msg = f"Error generating ATS score: {e}"
281
  logger.error(error_msg)
282
  logger.debug(traceback.format_exc())
283
- return {"ats_score": 50.0, "detailed_scores": {}, "feedback": {"error": error_msg}}
 
 
 
 
 
19
  Ensure the score is always a number between 0-10.
20
  """
21
 
22
+
23
  class ATSResumeParser:
24
  def __init__(self):
25
  logger.info("Initializing ATSResumeParser")
26
  self.score_weights = {
27
+ "skills_match": 30,
28
+ "experience_relevance": 25,
29
+ "education_relevance": 10,
30
+ "overall_formatting": 15,
31
+ "keyword_optimization": 10,
32
+ "extra_sections": 10,
33
  }
34
  self.total_weight = sum(self.score_weights.values())
35
  logger.debug(f"Score weights configured with total weight: {self.total_weight}")
 
40
  logger.debug("Parsing Gemini API response")
41
  response = json.loads(response_text)
42
  result = {
43
+ "score": float(response["score"]),
44
+ "matching": response.get("matching_elements", []),
45
+ "missing": response.get("missing_elements", []),
46
+ "explanation": response.get("explanation", ""),
47
  }
48
  logger.debug(f"Successfully parsed response with score: {result['score']}")
49
  return result
50
  except (json.JSONDecodeError, KeyError, ValueError) as e:
51
  logger.error(f"Error parsing Gemini response: {e}")
52
  logger.debug(f"Failed response content: {response_text}")
53
+ return {"score": 5.0, "matching": [], "missing": [], "explanation": ""}
54
  except Exception as e:
55
  logger.error(f"Unexpected error parsing Gemini response: {e}")
56
  logger.debug(traceback.format_exc())
57
+ return {"score": 5.0, "matching": [], "missing": [], "explanation": ""}
58
 
59
  def _score_skills(self, skills: List[str], job_description: Optional[str]) -> Dict:
60
  """Score skills with optimized processing"""
61
  if not skills:
62
+ return {
63
+ "score": 0,
64
+ "matching": [],
65
+ "missing": [],
66
+ "explanation": "No skills provided",
67
+ }
68
+
69
+ base_score = 70
70
+
71
  skills_length = len(skills)
72
  if skills_length >= 5:
73
  base_score += 10
74
  if skills_length >= 10:
75
  base_score += 10
76
+
77
  if not job_description:
78
+ return {
79
+ "score": base_score,
80
+ "matching": skills,
81
+ "missing": [],
82
+ "explanation": "No job description provided",
83
+ }
84
 
85
+ prompt = f"""Skills: {','.join(skills[:20])}.
86
+ Job description: {job_description[:500]}.
87
+ Evaluate the skills match against this job description.
88
+ In the 'matching_elements' list, include only skills that directly match the job requirements.
89
+ In the 'missing_elements' list, include ONLY specific missing skills from the job description (no paragraphs or lengthy text).
90
+ Rate the overall match on a scale of 0-10."""
91
+
92
+ response = self._parse_gemini_response(get_response(prompt, SYSTEM_INSTRUCTION))
93
  return {
94
+ "score": (base_score + (response["score"] * 10)) / 2,
95
+ "matching": response["matching"],
96
+ "missing": response["missing"],
97
+ "explanation": response["explanation"],
98
  }
99
 
100
+ def _score_experience(
101
+ self, experience: List[Dict], job_description: Optional[str]
102
+ ) -> Dict:
103
  """Score experience with optimized processing"""
104
  if not experience:
105
+ return {
106
+ "score": 0,
107
+ "matching": [],
108
+ "missing": [],
109
+ "explanation": "No experience provided",
110
+ }
111
+
112
  base_score = 60
113
+
114
+ required_keys = {"title", "company", "description"}
115
+ improvement_keywords = {"increased", "decreased", "improved", "%", "reduced"}
116
+
117
  for exp in experience:
118
  if required_keys.issubset(exp.keys()):
119
  base_score += 10
120
+
121
+ description = exp.get("description", "")
122
+ if description and any(
123
+ keyword in description for keyword in improvement_keywords
124
+ ):
125
  base_score += 5
126
+
127
  if not job_description:
128
+ return {
129
+ "score": base_score,
130
+ "matching": [],
131
+ "missing": [],
132
+ "explanation": "No job description provided",
133
+ }
134
+
135
+ simplified_exp = [
136
+ {"title": e.get("title", ""), "description": e.get("description", "")[:100]}
137
+ for e in experience[:3]
138
+ ]
139
+
140
  prompt = f"Experience: {json.dumps(simplified_exp)}. Job description: {job_description[:500]}. Rate match."
141
+
142
+ response = self._parse_gemini_response(get_response(prompt, SYSTEM_INSTRUCTION))
 
 
143
  return {
144
+ "score": (base_score + (response["score"] * 10)) / 2,
145
+ "matching": response["matching"],
146
+ "missing": response["missing"],
147
+ "explanation": response["explanation"],
148
  }
149
 
150
  def _score_education(self, education: List[Dict]) -> Dict:
151
  """Score education with optimized processing"""
152
  if not education:
153
+ return {
154
+ "score": 0,
155
+ "matching": [],
156
+ "missing": [],
157
+ "explanation": "No education provided",
158
+ }
159
+
160
  score = 70
161
  matching = []
162
+
163
+ required_keys = {"institution", "degree", "start_date", "end_date"}
164
+
165
  for edu in education:
166
+ gpa = edu.get("gpa")
167
  if gpa and float(gpa) > 3.0:
168
  score += 10
169
  matching.append(f"Strong GPA: {gpa}")
170
+
171
  if required_keys.issubset(edu.keys()):
172
  score += 10
173
+ matching.append(
174
+ f"{edu.get('degree', '')} from {edu.get('institution', '')}"
175
+ )
176
+
177
  return {
178
+ "score": min(100, score),
179
+ "matching": matching,
180
+ "missing": [],
181
+ "explanation": "Education assessment completed",
182
  }
183
 
184
  def _score_formatting(self, structured_data: Dict) -> Dict:
185
  """Score formatting with optimized processing"""
186
  score = 100
187
+
188
+ contact_fields = ("name", "email", "phone")
189
+ essential_sections = ("skills", "experience", "education")
190
+
191
  structured_keys = set(structured_data.keys())
192
+
193
+ missing_contacts = [
194
+ field for field in contact_fields if field not in structured_keys
195
+ ]
196
  if missing_contacts:
197
  score -= 20
198
+
199
+ missing_sections = [
200
+ section for section in essential_sections if section not in structured_keys
201
+ ]
202
  missing_penalty = 15 * len(missing_sections)
203
  if missing_sections:
204
  score -= missing_penalty
205
+
206
  return {
207
+ "score": max(0, score),
208
+ "matching": [field for field in contact_fields if field in structured_keys],
209
+ "missing": missing_contacts + missing_sections,
210
+ "explanation": "Format assessment completed",
211
  }
212
 
213
  def _score_extra(self, structured_data: Dict) -> Dict:
 
221
  "patents": 15,
222
  "professional_affiliations": 10,
223
  "portfolio_links": 10,
224
+ "summary_or_objective": 10,
225
  }
226
+
227
  total_possible = sum(extra_sections.values())
228
+
229
  structured_keys = set(structured_data.keys())
230
+
231
  score = 0
232
  matching = []
233
  missing = []
234
+
235
  for section, weight in extra_sections.items():
236
  if section in structured_keys and structured_data.get(section):
237
  score += weight
238
+ matching.append(section.replace("_", " ").title())
239
  else:
240
+ missing.append(section.replace("_", " ").title())
241
+
242
  normalized_score = (score * 100) // total_possible if total_possible > 0 else 0
243
+
244
  return {
245
+ "score": normalized_score,
246
+ "matching": matching,
247
+ "missing": missing,
248
+ "explanation": "Additional sections assessment completed",
249
  }
250
 
251
+ def parse_and_score(
252
+ self, structured_data: Dict, job_description: Optional[str] = None
253
+ ) -> Dict:
254
  """Parse and score resume with parallel processing"""
255
  scores = {}
256
+ feedback = {"strengths": [], "improvements": []}
257
  detailed_feedback = {}
258
+
259
  with concurrent.futures.ThreadPoolExecutor() as executor:
260
  # Define tasks to run in parallel
261
  tasks = {
262
+ "skills_match": executor.submit(
263
+ self._score_skills,
264
+ structured_data.get("skills", []),
265
+ job_description,
266
+ ),
267
+ "experience_relevance": executor.submit(
268
+ self._score_experience,
269
+ structured_data.get("experience", []),
270
+ job_description,
271
+ ),
272
+ "education_relevance": executor.submit(
273
+ self._score_education, structured_data.get("education", [])
274
+ ),
275
+ "overall_formatting": executor.submit(
276
+ self._score_formatting, structured_data
277
+ ),
278
+ "extra_sections": executor.submit(self._score_extra, structured_data),
279
  }
280
+
281
  total_score = 0
282
  for category, future in tasks.items():
283
  result = future.result()
284
+
285
+ scores[category] = result["score"]
286
+
287
  weight = self.score_weights[category] / 100
288
+ total_score += result["score"] * weight
289
+
290
  detailed_feedback[category] = {
291
+ "matching_elements": result["matching"],
292
+ "missing_elements": result["missing"],
293
+ "explanation": result["explanation"],
294
  }
295
+
296
+ if result["score"] >= 80:
297
+ feedback["strengths"].append(f"Strong {category.replace('_', ' ')}")
298
+ elif result["score"] < 60:
299
+ feedback["improvements"].append(
300
+ f"Improve {category.replace('_', ' ')}"
301
+ )
302
+
303
  return {
304
+ "total_score": round(total_score, 2),
305
+ "detailed_scores": scores,
306
+ "feedback": feedback,
307
+ "detailed_feedback": detailed_feedback,
308
  }
309
 
310
+
311
+ def generate_ats_score(
312
+ structured_data: Union[Dict, str], job_des_text: Optional[str] = None
313
+ ) -> Dict:
314
  """Generate ATS score with optimized processing"""
315
  try:
316
  logger.info("Starting ATS score generation")
317
  if not structured_data:
318
  return {"error": "No resume data provided"}
319
+
320
  if isinstance(structured_data, str):
321
  try:
322
  structured_data = json.loads(structured_data)
323
  except json.JSONDecodeError:
324
  return {"error": "Invalid JSON format in resume data"}
325
+
326
  parser = ATSResumeParser()
327
  result = parser.parse_and_score(structured_data, job_des_text)
328
+
329
  logger.info("ATS score generation completed successfully")
330
  return {
331
+ "ats_score": result["total_score"],
332
+ "detailed_scores": result["detailed_scores"],
333
+ "feedback": result["feedback"],
334
+ "detailed_feedback": result["detailed_feedback"],
335
  }
336
+
337
  except Exception as e:
338
  error_msg = f"Error generating ATS score: {e}"
339
  logger.error(error_msg)
340
  logger.debug(traceback.format_exc())
341
+ return {
342
+ "ats_score": 50.0,
343
+ "detailed_scores": {},
344
+ "feedback": {"error": error_msg},
345
+ }
Process/views.py CHANGED
@@ -26,6 +26,7 @@ except Exception as e:
26
  logger.error(f"Failed to load model: {e}")
27
  logger.debug(traceback.format_exc())
28
 
 
29
  def get_embeddings(texts):
30
  try:
31
  logger.debug(f"Generating embeddings for {len(texts)} texts")
@@ -39,6 +40,7 @@ def get_embeddings(texts):
39
  logger.debug(traceback.format_exc())
40
  return None
41
 
 
42
  def calculate_similarity(job_description, resume_text):
43
  try:
44
  logger.info("Calculating similarity between job description and resume")
@@ -54,21 +56,22 @@ def calculate_similarity(job_description, resume_text):
54
  logger.debug(traceback.format_exc())
55
  return 0.0
56
 
 
57
  @csrf_exempt
58
  def process_resume(request):
59
- if request.method == 'POST':
60
  try:
61
  logger.info("Processing resume request")
62
  data = json.loads(request.body)
63
 
64
- user_id = data.get('user_id')
65
- file_link = data.get('file_link')
66
- job_description = data.get('job_description')
67
  logger.info(f"Received data for user_id: {user_id}")
68
-
69
  if not all([user_id, file_link, job_description]):
70
  logger.warning("Missing required fields in request")
71
- return JsonResponse({'error': 'Missing required fields'}, status=400)
72
 
73
  logger.info("Extracting Text from the pdf")
74
  resume = extract_text_from_pdf(file_link)
@@ -83,37 +86,39 @@ def process_resume(request):
83
  logger.info("ATS score generation completed")
84
 
85
  response_data = {
86
- 'user_id': user_id,
87
- 'similarity': "100.00",
88
- 'ats_score': ats_score,
89
- 'structured_data': st_data
90
  }
91
  logger.info("Sending successful response")
92
  return JsonResponse(response_data, status=200)
93
  except json.JSONDecodeError as e:
94
  logger.error(f"Invalid JSON received: {e}")
95
- return JsonResponse({'error': 'Invalid JSON format'}, status=400)
96
  except Exception as e:
97
  error_msg = f"Error processing resume: {e}"
98
  logger.error(error_msg)
99
  logger.debug(traceback.format_exc())
100
- return JsonResponse({'error': error_msg}, status=500)
101
  else:
102
  logger.warning(f"Unsupported method: {request.method}")
103
- return JsonResponse({'message': 'Only POST requests are allowed'}, status=405)
 
104
 
105
  def verify_api(request):
106
  logger.info(f"API verification request received via {request.method}")
107
- if request.method == 'GET':
108
- return JsonResponse({'message': 'yaay working-GET '}, status=200)
109
  else:
110
  logger.warning(f"Unsupported method for API verification: {request.method}")
111
- return JsonResponse({'error': 'Only GET requests are allowed'}, status=405)
 
112
 
113
  def home(request):
114
  logger.info(f"Home request received via {request.method}")
115
- if request.method == 'GET':
116
- return JsonResponse({'message': 'Welcome To Resume-ATS'}, status=200)
117
  else:
118
  logger.warning(f"Unsupported method for home: {request.method}")
119
- return JsonResponse({'error': 'Only GET requests are allowed'}, status=405)
 
26
  logger.error(f"Failed to load model: {e}")
27
  logger.debug(traceback.format_exc())
28
 
29
+
30
  def get_embeddings(texts):
31
  try:
32
  logger.debug(f"Generating embeddings for {len(texts)} texts")
 
40
  logger.debug(traceback.format_exc())
41
  return None
42
 
43
+
44
  def calculate_similarity(job_description, resume_text):
45
  try:
46
  logger.info("Calculating similarity between job description and resume")
 
56
  logger.debug(traceback.format_exc())
57
  return 0.0
58
 
59
+
60
  @csrf_exempt
61
  def process_resume(request):
62
+ if request.method == "POST":
63
  try:
64
  logger.info("Processing resume request")
65
  data = json.loads(request.body)
66
 
67
+ user_id = data.get("user_id")
68
+ file_link = data.get("file_link")
69
+ job_description = data.get("job_description")
70
  logger.info(f"Received data for user_id: {user_id}")
71
+
72
  if not all([user_id, file_link, job_description]):
73
  logger.warning("Missing required fields in request")
74
+ return JsonResponse({"error": "Missing required fields"}, status=400)
75
 
76
  logger.info("Extracting Text from the pdf")
77
  resume = extract_text_from_pdf(file_link)
 
86
  logger.info("ATS score generation completed")
87
 
88
  response_data = {
89
+ "user_id": user_id,
90
+ "similarity": "100.00",
91
+ "ats_score": ats_score,
92
+ "structured_data": st_data,
93
  }
94
  logger.info("Sending successful response")
95
  return JsonResponse(response_data, status=200)
96
  except json.JSONDecodeError as e:
97
  logger.error(f"Invalid JSON received: {e}")
98
+ return JsonResponse({"error": "Invalid JSON format"}, status=400)
99
  except Exception as e:
100
  error_msg = f"Error processing resume: {e}"
101
  logger.error(error_msg)
102
  logger.debug(traceback.format_exc())
103
+ return JsonResponse({"error": error_msg}, status=500)
104
  else:
105
  logger.warning(f"Unsupported method: {request.method}")
106
+ return JsonResponse({"message": "Only POST requests are allowed"}, status=405)
107
+
108
 
109
  def verify_api(request):
110
  logger.info(f"API verification request received via {request.method}")
111
+ if request.method == "GET":
112
+ return JsonResponse({"message": "yaay working-GET "}, status=200)
113
  else:
114
  logger.warning(f"Unsupported method for API verification: {request.method}")
115
+ return JsonResponse({"error": "Only GET requests are allowed"}, status=405)
116
+
117
 
118
  def home(request):
119
  logger.info(f"Home request received via {request.method}")
120
+ if request.method == "GET":
121
+ return JsonResponse({"message": "Welcome To Resume-ATS"}, status=200)
122
  else:
123
  logger.warning(f"Unsupported method for home: {request.method}")
124
+ return JsonResponse({"error": "Only GET requests are allowed"}, status=405)
ResumeATS/settings.py CHANGED
@@ -13,6 +13,7 @@ https://docs.djangoproject.com/en/5.1/ref/settings/
13
  from pathlib import Path
14
  from dotenv import load_dotenv
15
  import os
 
16
  load_dotenv()
17
  # Build paths inside the project like this: BASE_DIR / 'subdir'.
18
  BASE_DIR = Path(__file__).resolve().parent.parent
@@ -27,60 +28,60 @@ SECRET_KEY = os.getenv("SECRET_KEY")
27
  # SECURITY WARNING: don't run with debug turned on in production!
28
  DEBUG = False
29
 
30
- ALLOWED_HOSTS = ["127.0.0.1","harish20205-resume-ats.hf.space"]
31
 
32
 
33
  # Application definition
34
 
35
  INSTALLED_APPS = [
36
- 'django.contrib.admin',
37
- 'django.contrib.auth',
38
- 'django.contrib.contenttypes',
39
- 'django.contrib.sessions',
40
- 'django.contrib.messages',
41
- 'django.contrib.staticfiles',
42
- 'rest_framework',
43
- 'Process',
44
  ]
45
 
46
  MIDDLEWARE = [
47
- 'django.middleware.security.SecurityMiddleware',
48
- 'django.contrib.sessions.middleware.SessionMiddleware',
49
- 'django.middleware.common.CommonMiddleware',
50
- 'django.middleware.csrf.CsrfViewMiddleware',
51
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
52
- 'django.contrib.messages.middleware.MessageMiddleware',
53
- 'django.middleware.clickjacking.XFrameOptionsMiddleware',
54
  ]
55
 
56
- ROOT_URLCONF = 'ResumeATS.urls'
57
 
58
  TEMPLATES = [
59
  {
60
- 'BACKEND': 'django.template.backends.django.DjangoTemplates',
61
- 'DIRS': [],
62
- 'APP_DIRS': True,
63
- 'OPTIONS': {
64
- 'context_processors': [
65
- 'django.template.context_processors.debug',
66
- 'django.template.context_processors.request',
67
- 'django.contrib.auth.context_processors.auth',
68
- 'django.contrib.messages.context_processors.messages',
69
  ],
70
  },
71
  },
72
  ]
73
 
74
- WSGI_APPLICATION = 'ResumeATS.wsgi.application'
75
 
76
 
77
  # Database
78
  # https://docs.djangoproject.com/en/5.1/ref/settings/#databases
79
 
80
  DATABASES = {
81
- 'default': {
82
- 'ENGINE': 'django.db.backends.sqlite3',
83
- 'NAME': BASE_DIR / 'db.sqlite3',
84
  }
85
  }
86
 
@@ -90,16 +91,16 @@ DATABASES = {
90
 
91
  AUTH_PASSWORD_VALIDATORS = [
92
  {
93
- 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
94
  },
95
  {
96
- 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
97
  },
98
  {
99
- 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
100
  },
101
  {
102
- 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
103
  },
104
  ]
105
 
@@ -107,9 +108,9 @@ AUTH_PASSWORD_VALIDATORS = [
107
  # Internationalization
108
  # https://docs.djangoproject.com/en/5.1/topics/i18n/
109
 
110
- LANGUAGE_CODE = 'en-us'
111
 
112
- TIME_ZONE = 'UTC'
113
 
114
  USE_I18N = True
115
 
@@ -119,9 +120,9 @@ USE_TZ = True
119
  # Static files (CSS, JavaScript, Images)
120
  # https://docs.djangoproject.com/en/5.1/howto/static-files/
121
 
122
- STATIC_URL = 'static/'
123
 
124
  # Default primary key field type
125
  # https://docs.djangoproject.com/en/5.1/ref/settings/#default-auto-field
126
 
127
- DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
 
13
  from pathlib import Path
14
  from dotenv import load_dotenv
15
  import os
16
+
17
  load_dotenv()
18
  # Build paths inside the project like this: BASE_DIR / 'subdir'.
19
  BASE_DIR = Path(__file__).resolve().parent.parent
 
28
  # SECURITY WARNING: don't run with debug turned on in production!
29
  DEBUG = False
30
 
31
+ ALLOWED_HOSTS = ["127.0.0.1", "harish20205-resume-ats.hf.space"]
32
 
33
 
34
  # Application definition
35
 
36
  INSTALLED_APPS = [
37
+ "django.contrib.admin",
38
+ "django.contrib.auth",
39
+ "django.contrib.contenttypes",
40
+ "django.contrib.sessions",
41
+ "django.contrib.messages",
42
+ "django.contrib.staticfiles",
43
+ "rest_framework",
44
+ "Process",
45
  ]
46
 
47
  MIDDLEWARE = [
48
+ "django.middleware.security.SecurityMiddleware",
49
+ "django.contrib.sessions.middleware.SessionMiddleware",
50
+ "django.middleware.common.CommonMiddleware",
51
+ "django.middleware.csrf.CsrfViewMiddleware",
52
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
53
+ "django.contrib.messages.middleware.MessageMiddleware",
54
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
55
  ]
56
 
57
+ ROOT_URLCONF = "ResumeATS.urls"
58
 
59
  TEMPLATES = [
60
  {
61
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
62
+ "DIRS": [],
63
+ "APP_DIRS": True,
64
+ "OPTIONS": {
65
+ "context_processors": [
66
+ "django.template.context_processors.debug",
67
+ "django.template.context_processors.request",
68
+ "django.contrib.auth.context_processors.auth",
69
+ "django.contrib.messages.context_processors.messages",
70
  ],
71
  },
72
  },
73
  ]
74
 
75
+ WSGI_APPLICATION = "ResumeATS.wsgi.application"
76
 
77
 
78
  # Database
79
  # https://docs.djangoproject.com/en/5.1/ref/settings/#databases
80
 
81
  DATABASES = {
82
+ "default": {
83
+ "ENGINE": "django.db.backends.sqlite3",
84
+ "NAME": BASE_DIR / "db.sqlite3",
85
  }
86
  }
87
 
 
91
 
92
  AUTH_PASSWORD_VALIDATORS = [
93
  {
94
+ "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
95
  },
96
  {
97
+ "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
98
  },
99
  {
100
+ "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
101
  },
102
  {
103
+ "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
104
  },
105
  ]
106
 
 
108
  # Internationalization
109
  # https://docs.djangoproject.com/en/5.1/topics/i18n/
110
 
111
+ LANGUAGE_CODE = "en-us"
112
 
113
+ TIME_ZONE = "UTC"
114
 
115
  USE_I18N = True
116
 
 
120
  # Static files (CSS, JavaScript, Images)
121
  # https://docs.djangoproject.com/en/5.1/howto/static-files/
122
 
123
+ STATIC_URL = "static/"
124
 
125
  # Default primary key field type
126
  # https://docs.djangoproject.com/en/5.1/ref/settings/#default-auto-field
127
 
128
+ DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"