cv_job / app.py
saifeddinemk's picture
Fixed app v2
96ed827
raw
history blame
3.65 kB
import gradio as gr
from llama_cpp import Llama
from transformers import pipeline
import json
# Load the Llama model
try:
llm = Llama.from_pretrained(
repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
filename="smollm2-360m-instruct-q8_0.gguf" # Replace with the correct path to your GGUF file
)
except Exception as e:
raise RuntimeError(f"Failed to load model: {e}")
# Load summarization model
summarizer = pipeline("summarization")
# Summarize text to fit within token limits
def summarize_text(text, max_length=100):
# Use the summarizer to condense the text
summary = summarizer(text, max_length=max_length, min_length=25, do_sample=False)
return summary[0]["summary_text"]
# Function to match CV to job descriptions with debug information
def match_cv_to_jobs(cv_text, job_descriptions):
debug_info = "Debug Info:\n"
results = []
# Summarize `cv_text` and `job_descriptions` to manage token limits
summarized_cv = summarize_text(cv_text, max_length=400)
debug_info += f"Summarized CV Text: {summarized_cv}\n"
descriptions = job_descriptions.strip().split("\n")
for description in descriptions:
summarized_description = summarize_text(description, max_length=100)
debug_info += f"\nSummarized Job Description: {summarized_description}\n"
# Create a prompt to compare the summarized CV with each summarized job description
prompt = (
f"Compare the following job description with this resume. Job Description: {summarized_description}. "
f"Resume: {summarized_cv}. Provide a match score and a brief analysis."
)
debug_info += f"\nGenerated Prompt: {prompt}\n"
# Generate response from the model
try:
response = llm.create_chat_completion(
messages=[
{
"role": "user",
"content": prompt
}
]
)
# Extract the analysis text
response_content = response["choices"][0]["message"]["content"]
debug_info += f"Model Response: {response_content}\n"
# Attempt to parse as JSON; if not JSON, use the raw text
try:
response_data = json.loads(response_content)
results.append(response_data)
except json.JSONDecodeError:
results.append({
"Job Description": description,
"Analysis": response_content
})
except Exception as e:
debug_info += f"Error: {str(e)}\n"
results.append({"Job Description": description, "Error": str(e)})
return results, debug_info
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# CV and Job Description Matcher with Summarization and Debugging")
# Input fields for CV and job descriptions
cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
job_descriptions = gr.Textbox(label="Job Descriptions (one per line)", placeholder="Enter each job description on a new line", lines=5)
# Button and output area
match_button = gr.Button("Match CV to Job Descriptions")
output = gr.JSON(label="Match Results")
debug_output = gr.Textbox(label="Debug Info", lines=10) # Add a debug box to display debug info
# Set button click to run the function
match_button.click(fn=match_cv_to_jobs, inputs=[cv_text, job_descriptions], outputs=[output, debug_output])
demo.launch()