Spaces:
Sleeping
Sleeping
saifeddinemk
commited on
Commit
•
96ed827
1
Parent(s):
f65dc03
Fixed app v2
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
|
|
3 |
import json
|
4 |
|
5 |
# Load the Llama model
|
@@ -11,24 +12,38 @@ try:
|
|
11 |
except Exception as e:
|
12 |
raise RuntimeError(f"Failed to load model: {e}")
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
# Function to match CV to job descriptions with debug information
|
15 |
def match_cv_to_jobs(cv_text, job_descriptions):
|
16 |
debug_info = "Debug Info:\n"
|
17 |
results = []
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
response = llm.create_chat_completion(
|
33 |
messages=[
|
34 |
{
|
@@ -49,17 +64,17 @@ def match_cv_to_jobs(cv_text, job_descriptions):
|
|
49 |
except json.JSONDecodeError:
|
50 |
results.append({
|
51 |
"Job Description": description,
|
52 |
-
"Analysis": response_content
|
53 |
})
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
|
58 |
return results, debug_info
|
59 |
|
60 |
# Gradio interface
|
61 |
with gr.Blocks() as demo:
|
62 |
-
gr.Markdown("# CV and Job Description Matcher with Debugging")
|
63 |
|
64 |
# Input fields for CV and job descriptions
|
65 |
cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
|
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
+
from transformers import pipeline
|
4 |
import json
|
5 |
|
6 |
# Load the Llama model
|
|
|
12 |
except Exception as e:
|
13 |
raise RuntimeError(f"Failed to load model: {e}")
|
14 |
|
15 |
+
# Load summarization model
|
16 |
+
summarizer = pipeline("summarization")
|
17 |
+
|
18 |
+
# Summarize text to fit within token limits
|
19 |
+
def summarize_text(text, max_length=100):
|
20 |
+
# Use the summarizer to condense the text
|
21 |
+
summary = summarizer(text, max_length=max_length, min_length=25, do_sample=False)
|
22 |
+
return summary[0]["summary_text"]
|
23 |
+
|
24 |
# Function to match CV to job descriptions with debug information
|
25 |
def match_cv_to_jobs(cv_text, job_descriptions):
|
26 |
debug_info = "Debug Info:\n"
|
27 |
results = []
|
28 |
|
29 |
+
# Summarize `cv_text` and `job_descriptions` to manage token limits
|
30 |
+
summarized_cv = summarize_text(cv_text, max_length=400)
|
31 |
+
debug_info += f"Summarized CV Text: {summarized_cv}\n"
|
32 |
+
|
33 |
+
descriptions = job_descriptions.strip().split("\n")
|
34 |
+
for description in descriptions:
|
35 |
+
summarized_description = summarize_text(description, max_length=100)
|
36 |
+
debug_info += f"\nSummarized Job Description: {summarized_description}\n"
|
37 |
|
38 |
+
# Create a prompt to compare the summarized CV with each summarized job description
|
39 |
+
prompt = (
|
40 |
+
f"Compare the following job description with this resume. Job Description: {summarized_description}. "
|
41 |
+
f"Resume: {summarized_cv}. Provide a match score and a brief analysis."
|
42 |
+
)
|
43 |
+
debug_info += f"\nGenerated Prompt: {prompt}\n"
|
44 |
+
|
45 |
+
# Generate response from the model
|
46 |
+
try:
|
47 |
response = llm.create_chat_completion(
|
48 |
messages=[
|
49 |
{
|
|
|
64 |
except json.JSONDecodeError:
|
65 |
results.append({
|
66 |
"Job Description": description,
|
67 |
+
"Analysis": response_content
|
68 |
})
|
69 |
+
except Exception as e:
|
70 |
+
debug_info += f"Error: {str(e)}\n"
|
71 |
+
results.append({"Job Description": description, "Error": str(e)})
|
72 |
|
73 |
return results, debug_info
|
74 |
|
75 |
# Gradio interface
|
76 |
with gr.Blocks() as demo:
|
77 |
+
gr.Markdown("# CV and Job Description Matcher with Summarization and Debugging")
|
78 |
|
79 |
# Input fields for CV and job descriptions
|
80 |
cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
|