Spaces:
Sleeping
Sleeping
Commit
·
725f549
1
Parent(s):
52ba51a
Fixed app v2
Browse files- app.py +42 -37
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,58 +1,63 @@
|
|
1 |
-
import uvicorn
|
2 |
from fastapi import FastAPI, HTTPException
|
3 |
from pydantic import BaseModel
|
4 |
from llama_cpp import Llama
|
5 |
-
import json
|
6 |
|
7 |
# Initialize FastAPI app
|
8 |
app = FastAPI()
|
9 |
|
10 |
-
#
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
cv_text: str
|
13 |
job_descriptions: str
|
14 |
|
15 |
# Define response model
|
16 |
-
class
|
17 |
results: list
|
18 |
|
19 |
-
#
|
20 |
-
@app.post("/
|
21 |
-
async def
|
22 |
try:
|
23 |
-
#
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
messages
|
36 |
-
{
|
|
|
|
|
|
|
37 |
]
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
results.append(response_data)
|
47 |
-
except json.JSONDecodeError:
|
48 |
-
results.append({
|
49 |
-
"Job Description": description,
|
50 |
-
"Analysis": response_content # Use raw response if JSON parsing fails
|
51 |
-
})
|
52 |
|
53 |
-
return
|
54 |
|
55 |
except Exception as e:
|
56 |
raise HTTPException(status_code=500, detail=str(e))
|
57 |
|
58 |
-
#
|
|
|
|
|
1 |
from fastapi import FastAPI, HTTPException
|
2 |
from pydantic import BaseModel
|
3 |
from llama_cpp import Llama
|
|
|
4 |
|
5 |
# Initialize FastAPI app
|
6 |
app = FastAPI()
|
7 |
|
8 |
+
# Load the SecurityLLM model
|
9 |
+
try:
|
10 |
+
llm = Llama.from_pretrained(
|
11 |
+
repo_id="QuantFactory/SecurityLLM-GGUF",
|
12 |
+
filename="SecurityLLM.Q5_K_M.gguf", # Ensure the file path is correct
|
13 |
+
)
|
14 |
+
except Exception as e:
|
15 |
+
raise RuntimeError(f"Failed to load model: {e}")
|
16 |
+
|
17 |
+
# Define request model for CV matching
|
18 |
+
class CVMatchRequest(BaseModel):
|
19 |
cv_text: str
|
20 |
job_descriptions: str
|
21 |
|
22 |
# Define response model
|
23 |
+
class CVMatchResponse(BaseModel):
|
24 |
results: list
|
25 |
|
26 |
+
# Define the route for CV and job description matching
|
27 |
+
@app.post("/match_cv_to_jobs", response_model=CVMatchResponse)
|
28 |
+
async def match_cv_to_jobs(request: CVMatchRequest):
|
29 |
try:
|
30 |
+
# Split job descriptions by line
|
31 |
+
descriptions = request.job_descriptions.strip().split("\n")
|
32 |
+
results = []
|
33 |
+
|
34 |
+
for description in descriptions:
|
35 |
+
# Create a prompt to compare the CV with each job description
|
36 |
+
prompt = (
|
37 |
+
f"Compare the following job description with this resume. Job Description: {description}. "
|
38 |
+
f"Resume: {request.cv_text}. Provide a match score and a brief analysis."
|
39 |
+
)
|
40 |
|
41 |
+
# Generate response from the model
|
42 |
+
response = llm.create_chat_completion(
|
43 |
+
messages=[
|
44 |
+
{
|
45 |
+
"role": "user",
|
46 |
+
"content": prompt
|
47 |
+
}
|
48 |
]
|
49 |
+
)
|
50 |
+
|
51 |
+
# Extract and return the analysis text
|
52 |
+
analysis_text = response["choices"][0]["message"]["content"]
|
53 |
+
results.append({
|
54 |
+
"Job Description": description,
|
55 |
+
"Analysis": analysis_text
|
56 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
return CVMatchResponse(results=results)
|
59 |
|
60 |
except Exception as e:
|
61 |
raise HTTPException(status_code=500, detail=str(e))
|
62 |
|
63 |
+
# To run the app, use: uvicorn app:app --reload
|
requirements.txt
CHANGED
@@ -2,4 +2,5 @@ fastapi
|
|
2 |
uvicorn
|
3 |
transformers
|
4 |
git+https://github.com/abetlen/llama-cpp-python.git
|
5 |
-
nest_asyncio
|
|
|
|
2 |
uvicorn
|
3 |
transformers
|
4 |
git+https://github.com/abetlen/llama-cpp-python.git
|
5 |
+
nest_asyncio
|
6 |
+
pydantic
|