|
import gradio as gr |
|
import torch |
|
from transformers import pipeline |
|
import pdfplumber |
|
import re |
|
import pandas as pd |
|
|
|
|
|
nlp = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", framework="pt") |
|
|
|
def extract_text_from_pdf(pdf_file): |
|
"""Extract text from the uploaded PDF resume.""" |
|
with pdfplumber.open(pdf_file) as pdf: |
|
text = "" |
|
for page in pdf.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
def parse_resume(resume_text): |
|
"""Parse the resume and extract details like name, email, phone, and skills.""" |
|
|
|
phone_pattern = r'\(?\d{3}\)?[-.\s]?\d{3}[-.\s]?\d{4}' |
|
email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}' |
|
|
|
|
|
phone = re.findall(phone_pattern, resume_text) |
|
email = re.findall(email_pattern, resume_text) |
|
|
|
|
|
entities = nlp(resume_text) |
|
skills = [entity['word'] for entity in entities if 'MISC' in entity['entity']] |
|
|
|
|
|
parsed_data = { |
|
"Phone": phone[0] if phone else "Not found", |
|
"Email": email[0] if email else "Not found", |
|
"Skills": ", ".join(skills), |
|
} |
|
|
|
return parsed_data |
|
|
|
def process_resumes(pdf_files): |
|
"""Process multiple resumes and output a single Excel file.""" |
|
all_parsed_data = [] |
|
|
|
|
|
for pdf_file in pdf_files: |
|
resume_text = extract_text_from_pdf(pdf_file) |
|
parsed_info = parse_resume(resume_text) |
|
all_parsed_data.append(parsed_info) |
|
|
|
|
|
df = pd.DataFrame(all_parsed_data) |
|
|
|
|
|
output_file = "parsed_resumes.xlsx" |
|
df.to_excel(output_file, index=False) |
|
|
|
return output_file |
|
|
|
|
|
gr.Interface( |
|
fn=process_resumes, |
|
inputs=gr.File(file_count="multiple", label="Upload Resumes (PDFs)"), |
|
outputs=gr.File(label="Download Parsed Data (Excel)"), |
|
title="AI Resume Parser", |
|
description="Upload multiple resumes (PDFs) to extract details like Name, Email, Phone, and Skills. The results will be saved in an Excel file." |
|
).launch() |
|
|