saifeddinemk commited on
Commit
725f549
·
1 Parent(s): 52ba51a

Fixed app v2

Browse files
Files changed (2) hide show
  1. app.py +42 -37
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,58 +1,63 @@
1
- import uvicorn
2
  from fastapi import FastAPI, HTTPException
3
  from pydantic import BaseModel
4
  from llama_cpp import Llama
5
- import json
6
 
7
  # Initialize FastAPI app
8
  app = FastAPI()
9
 
10
- # Define request model for job description comparison
11
- class CompareRequest(BaseModel):
 
 
 
 
 
 
 
 
 
12
  cv_text: str
13
  job_descriptions: str
14
 
15
  # Define response model
16
- class AnalysisResponse(BaseModel):
17
  results: list
18
 
19
- # Endpoint to compare job descriptions with the CV text
20
- @app.post("/compare/", response_model=AnalysisResponse)
21
- async def compare_job_cv(request: CompareRequest):
22
  try:
23
- # Load the Llama model within the endpoint to control its lifecycle
24
- with Llama.from_pretrained(
25
- repo_id="HuggingFaceTB/SmolLM2-360M-Instruct-GGUF",
26
- filename="smollm2-360m-instruct-q8_0.gguf" # Replace with the correct path to your GGUF file
27
- ) as llm:
28
-
29
- # Split job descriptions by line
30
- descriptions = request.job_descriptions.strip().split("\n")
31
- results = []
 
32
 
33
- for description in descriptions:
34
- # Create chat messages for each job description
35
- messages = [
36
- {"role": "user", "content": f"Compare the following job description with this resume. Job Description: {description}. Resume: {request.cv_text}. Give a match score and brief analysis."}
 
 
 
37
  ]
38
-
39
- # Generate response using Llama
40
- response = llm.create_chat_completion(messages=messages)
41
- response_content = response["choices"][0]["message"]["content"]
42
-
43
- # Parse response content for a score and summary
44
- try:
45
- response_data = json.loads(response_content)
46
- results.append(response_data)
47
- except json.JSONDecodeError:
48
- results.append({
49
- "Job Description": description,
50
- "Analysis": response_content # Use raw response if JSON parsing fails
51
- })
52
 
53
- return AnalysisResponse(results=results)
54
 
55
  except Exception as e:
56
  raise HTTPException(status_code=500, detail=str(e))
57
 
58
- # Run the app with: uvicorn main:app --reload
 
 
1
  from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
  from llama_cpp import Llama
 
4
 
5
  # Initialize FastAPI app
6
  app = FastAPI()
7
 
8
+ # Load the SecurityLLM model
9
+ try:
10
+ llm = Llama.from_pretrained(
11
+ repo_id="QuantFactory/SecurityLLM-GGUF",
12
+ filename="SecurityLLM.Q5_K_M.gguf", # Ensure the file path is correct
13
+ )
14
+ except Exception as e:
15
+ raise RuntimeError(f"Failed to load model: {e}")
16
+
17
+ # Define request model for CV matching
18
+ class CVMatchRequest(BaseModel):
19
  cv_text: str
20
  job_descriptions: str
21
 
22
  # Define response model
23
+ class CVMatchResponse(BaseModel):
24
  results: list
25
 
26
+ # Define the route for CV and job description matching
27
+ @app.post("/match_cv_to_jobs", response_model=CVMatchResponse)
28
+ async def match_cv_to_jobs(request: CVMatchRequest):
29
  try:
30
+ # Split job descriptions by line
31
+ descriptions = request.job_descriptions.strip().split("\n")
32
+ results = []
33
+
34
+ for description in descriptions:
35
+ # Create a prompt to compare the CV with each job description
36
+ prompt = (
37
+ f"Compare the following job description with this resume. Job Description: {description}. "
38
+ f"Resume: {request.cv_text}. Provide a match score and a brief analysis."
39
+ )
40
 
41
+ # Generate response from the model
42
+ response = llm.create_chat_completion(
43
+ messages=[
44
+ {
45
+ "role": "user",
46
+ "content": prompt
47
+ }
48
  ]
49
+ )
50
+
51
+ # Extract and return the analysis text
52
+ analysis_text = response["choices"][0]["message"]["content"]
53
+ results.append({
54
+ "Job Description": description,
55
+ "Analysis": analysis_text
56
+ })
 
 
 
 
 
 
57
 
58
+ return CVMatchResponse(results=results)
59
 
60
  except Exception as e:
61
  raise HTTPException(status_code=500, detail=str(e))
62
 
63
+ # To run the app, use: uvicorn app:app --reload
requirements.txt CHANGED
@@ -2,4 +2,5 @@ fastapi
2
  uvicorn
3
  transformers
4
  git+https://github.com/abetlen/llama-cpp-python.git
5
- nest_asyncio
 
 
2
  uvicorn
3
  transformers
4
  git+https://github.com/abetlen/llama-cpp-python.git
5
+ nest_asyncio
6
+ pydantic