File size: 2,950 Bytes
9208e17
5716ab8
e079d59
b397dc0
e079d59
725f549
 
e079d59
 
725f549
 
 
 
f65dc03
9208e17
f65dc03
9208e17
 
f65dc03
 
 
725f549
f65dc03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9208e17
f65dc03
9208e17
 
 
f65dc03
9208e17
e079d59
9208e17
 
 
e079d59
 
9208e17
f65dc03
9f26a6c
e079d59
f65dc03
91207a8
9208e17
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import gradio as gr
from llama_cpp import Llama
import json

# Load the Llama model
try:
    llm = Llama.from_pretrained(
        repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
        filename="smollm2-360m-instruct-q8_0.gguf"  # Replace with the correct path to your GGUF file
    )
except Exception as e:
    raise RuntimeError(f"Failed to load model: {e}")

# Function to match CV to job descriptions with debug information
def match_cv_to_jobs(cv_text, job_descriptions):
    debug_info = "Debug Info:\n"
    results = []
    
    try:
        # Split job descriptions by line
        descriptions = job_descriptions.strip().split("\n")
        
        for description in descriptions:
            # Create a prompt to compare the CV with each job description
            prompt = (
                f"Compare the following job description with this resume. Job Description: {description}. "
                f"Resume: {cv_text}. Provide a match score and a brief analysis."
            )
            debug_info += f"\nGenerated Prompt: {prompt}\n"
            
            # Generate response from the model
            response = llm.create_chat_completion(
                messages=[
                    {
                        "role": "user",
                        "content": prompt
                    }
                ]
            )
            
            # Extract the analysis text
            response_content = response["choices"][0]["message"]["content"]
            debug_info += f"Model Response: {response_content}\n"
            
            # Attempt to parse as JSON; if not JSON, use the raw text
            try:
                response_data = json.loads(response_content)
                results.append(response_data)
            except json.JSONDecodeError:
                results.append({
                    "Job Description": description,
                    "Analysis": response_content  # Use raw response if JSON parsing fails
                })
    except Exception as e:
        debug_info += f"Error: {str(e)}\n"
        results.append({"Error": str(e)})
    
    return results, debug_info

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# CV and Job Description Matcher with Debugging")
    
    # Input fields for CV and job descriptions
    cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
    job_descriptions = gr.Textbox(label="Job Descriptions (one per line)", placeholder="Enter each job description on a new line", lines=5)
    
    # Button and output area
    match_button = gr.Button("Match CV to Job Descriptions")
    output = gr.JSON(label="Match Results")
    debug_output = gr.Textbox(label="Debug Info", lines=10)  # Add a debug box to display debug info
    
    # Set button click to run the function
    match_button.click(fn=match_cv_to_jobs, inputs=[cv_text, job_descriptions], outputs=[output, debug_output])

demo.launch()