saifeddinemk commited on
Commit
9208e17
1 Parent(s): 725f549

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +42 -48
app.py CHANGED
@@ -1,10 +1,6 @@
1
- from fastapi import FastAPI, HTTPException
2
- from pydantic import BaseModel
3
  from llama_cpp import Llama
4
 
5
- # Initialize FastAPI app
6
- app = FastAPI()
7
-
8
  # Load the SecurityLLM model
9
  try:
10
  llm = Llama.from_pretrained(
@@ -14,50 +10,48 @@ try:
14
  except Exception as e:
15
  raise RuntimeError(f"Failed to load model: {e}")
16
 
17
- # Define request model for CV matching
18
- class CVMatchRequest(BaseModel):
19
- cv_text: str
20
- job_descriptions: str
21
-
22
- # Define response model
23
- class CVMatchResponse(BaseModel):
24
- results: list
25
-
26
- # Define the route for CV and job description matching
27
- @app.post("/match_cv_to_jobs", response_model=CVMatchResponse)
28
- async def match_cv_to_jobs(request: CVMatchRequest):
29
- try:
30
- # Split job descriptions by line
31
- descriptions = request.job_descriptions.strip().split("\n")
32
- results = []
33
 
34
- for description in descriptions:
35
- # Create a prompt to compare the CV with each job description
36
- prompt = (
37
- f"Compare the following job description with this resume. Job Description: {description}. "
38
- f"Resume: {request.cv_text}. Provide a match score and a brief analysis."
39
- )
40
-
41
- # Generate response from the model
42
- response = llm.create_chat_completion(
43
- messages=[
44
- {
45
- "role": "user",
46
- "content": prompt
47
- }
48
- ]
49
- )
50
-
51
- # Extract and return the analysis text
52
- analysis_text = response["choices"][0]["message"]["content"]
53
- results.append({
54
- "Job Description": description,
55
- "Analysis": analysis_text
56
- })
57
 
58
- return CVMatchResponse(results=results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- except Exception as e:
61
- raise HTTPException(status_code=500, detail=str(e))
62
 
63
- # To run the app, use: uvicorn app:app --reload
 
1
+ import gradio as gr
 
2
  from llama_cpp import Llama
3
 
 
 
 
4
  # Load the SecurityLLM model
5
  try:
6
  llm = Llama.from_pretrained(
 
10
  except Exception as e:
11
  raise RuntimeError(f"Failed to load model: {e}")
12
 
13
+ # Function to match CV to job descriptions
14
+ def match_cv_to_jobs(cv_text, job_descriptions):
15
+ # Split job descriptions by line
16
+ descriptions = job_descriptions.strip().split("\n")
17
+ results = []
18
+
19
+ for description in descriptions:
20
+ # Create a prompt to compare the CV with each job description
21
+ prompt = (
22
+ f"Compare the following job description with this resume. Job Description: {description}. "
23
+ f"Resume: {cv_text}. Provide a match score and a brief analysis."
24
+ )
 
 
 
 
25
 
26
+ # Generate response from the model
27
+ response = llm.create_chat_completion(
28
+ messages=[
29
+ {
30
+ "role": "user",
31
+ "content": prompt
32
+ }
33
+ ]
34
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ # Extract and store the analysis text
37
+ analysis_text = response["choices"][0]["message"]["content"]
38
+ results.append({
39
+ "Job Description": description,
40
+ "Analysis": analysis_text
41
+ })
42
+
43
+ return results
44
+
45
+ # Gradio interface
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown("# CV to Job Description Matcher")
48
+
49
+ cv_text = gr.Textbox(label="CV Text", placeholder="Enter the CV text here", lines=10)
50
+ job_descriptions = gr.Textbox(label="Job Descriptions (one per line)", placeholder="Enter each job description on a new line", lines=5)
51
+ match_button = gr.Button("Match CV to Job Descriptions")
52
+
53
+ output = gr.JSON(label="Match Results")
54
 
55
+ match_button.click(fn=match_cv_to_jobs, inputs=[cv_text, job_descriptions], outputs=output)
 
56
 
57
+ demo.launch()