saifeddinemk commited on
Commit
732403f
1 Parent(s): 7a1bba5

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +35 -36
app.py CHANGED
@@ -1,22 +1,22 @@
1
- from transformers import AutoTokenizer
2
  from optimum.intel.openvino import OVModelForCausalLM
3
  import gradio as gr
4
- from transformers import pipeline
5
  import json
6
 
7
- # Load OpenVINO GPT-J model
8
  model_id = "OpenVINO/gpt-j-6b-int4-ov"
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
  model = OVModelForCausalLM.from_pretrained(model_id)
11
 
12
- # Summarization pipeline
13
- summarizer = pipeline("summarization")
 
14
 
15
  def summarize_text(text, max_length=100):
16
  summary = summarizer(text, max_length=max_length, min_length=25, do_sample=False)
17
  return summary[0]["summary_text"]
18
 
19
- def match_cv_to_jobs(cv_text, job_descriptions):
20
  debug_info = "Debug Info:\n"
21
  results = []
22
 
@@ -24,36 +24,35 @@ def match_cv_to_jobs(cv_text, job_descriptions):
24
  summarized_cv = summarize_text(cv_text, max_length=400)
25
  debug_info += f"Summarized CV Text: {summarized_cv}\n"
26
 
27
- descriptions = job_descriptions.strip().split("\n")
28
- for description in descriptions:
29
- summarized_description = summarize_text(description, max_length=100)
30
- debug_info += f"\nSummarized Job Description: {summarized_description}\n"
31
-
32
- # Create a prompt to compare the summarized CV with each job description
33
- prompt = (
34
- f"Compare the following job description with this resume. Job Description: {summarized_description}. "
35
- f"Resume: {summarized_cv}. Provide a match score and a brief analysis."
36
- )
37
- debug_info += f"\nGenerated Prompt: {prompt}\n"
 
 
 
 
 
 
38
 
39
- # Generate response from the model
40
- inputs = tokenizer(prompt, return_tensors="pt")
41
  try:
42
- outputs = model.generate(**inputs, max_length=200)
43
- response_content = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
44
- debug_info += f"Model Response: {response_content}\n"
45
-
46
- try:
47
- response_data = json.loads(response_content)
48
- results.append(response_data)
49
- except json.JSONDecodeError:
50
- results.append({
51
- "Job Description": description,
52
- "Analysis": response_content
53
- })
54
- except Exception as e:
55
- debug_info += f"Error: {str(e)}\n"
56
- results.append({"Job Description": description, "Error": str(e)})
57
 
58
  return results, debug_info
59
 
@@ -63,7 +62,7 @@ with gr.Blocks() as demo:
63
 
64
  # Input fields for CV and job descriptions
65
  cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
66
- job_descriptions = gr.Textbox(label="Job Descriptions (one per line)", placeholder="Enter each job description on a new line", lines=5)
67
 
68
  # Button and output area
69
  match_button = gr.Button("Match CV to Job Descriptions")
@@ -71,6 +70,6 @@ with gr.Blocks() as demo:
71
  debug_output = gr.Textbox(label="Debug Info", lines=10) # Add a debug box to display debug info
72
 
73
  # Set button click to run the function
74
- match_button.click(fn=match_cv_to_jobs, inputs=[cv_text, job_descriptions], outputs=[output, debug_output])
75
 
76
  demo.launch()
 
1
+ from transformers import AutoTokenizer, pipeline
2
  from optimum.intel.openvino import OVModelForCausalLM
3
  import gradio as gr
 
4
  import json
5
 
6
+ # Load OpenVINO GPT-J model for causal language modeling
7
  model_id = "OpenVINO/gpt-j-6b-int4-ov"
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
  model = OVModelForCausalLM.from_pretrained(model_id)
10
 
11
+ # Load a quantized summarization model
12
+ summarizer_model_id = "OpenVINO/distilbart-cnn-12-6-int8-ov" # Example of a quantized summarization model
13
+ summarizer = pipeline("summarization", model=summarizer_model_id)
14
 
15
  def summarize_text(text, max_length=100):
16
  summary = summarizer(text, max_length=max_length, min_length=25, do_sample=False)
17
  return summary[0]["summary_text"]
18
 
19
+ def match_cv_to_jobs(cv_text, job_descriptions_text):
20
  debug_info = "Debug Info:\n"
21
  results = []
22
 
 
24
  summarized_cv = summarize_text(cv_text, max_length=400)
25
  debug_info += f"Summarized CV Text: {summarized_cv}\n"
26
 
27
+ # Summarize all job descriptions at once
28
+ summarized_descriptions = summarize_text(job_descriptions_text, max_length=400)
29
+ debug_info += f"Summarized Job Descriptions: {summarized_descriptions}\n"
30
+
31
+ # Create a prompt to compare the summarized CV with the summarized job descriptions
32
+ prompt = (
33
+ f"Compare the following job descriptions with this resume. Job Descriptions: {summarized_descriptions}. "
34
+ f"Resume: {summarized_cv}. Provide a match score ONLY out of 100 "
35
+ )
36
+ debug_info += f"\nGenerated Prompt: {prompt}\n"
37
+
38
+ # Generate response from the model
39
+ inputs = tokenizer(prompt, return_tensors="pt")
40
+ try:
41
+ outputs = model.generate(**inputs, max_length=200)
42
+ response_content = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
43
+ debug_info += f"Model Response: {response_content}\n"
44
 
 
 
45
  try:
46
+ response_data = json.loads(response_content)
47
+ results.append(response_data)
48
+ except json.JSONDecodeError:
49
+ results.append({
50
+ "Job Descriptions": job_descriptions_text,
51
+ "Analysis": response_content
52
+ })
53
+ except Exception as e:
54
+ debug_info += f"Error: {str(e)}\n"
55
+ results.append({"Job Descriptions": job_descriptions_text, "Error": str(e)})
 
 
 
 
 
56
 
57
  return results, debug_info
58
 
 
62
 
63
  # Input fields for CV and job descriptions
64
  cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
65
+ job_descriptions_text = gr.Textbox(label="Job Descriptions", placeholder="Enter the job descriptions text here", lines=10)
66
 
67
  # Button and output area
68
  match_button = gr.Button("Match CV to Job Descriptions")
 
70
  debug_output = gr.Textbox(label="Debug Info", lines=10) # Add a debug box to display debug info
71
 
72
  # Set button click to run the function
73
+ match_button.click(fn=match_cv_to_jobs, inputs=[cv_text, job_descriptions_text], outputs=[output, debug_output])
74
 
75
  demo.launch()