Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from llama_cpp import Llama | |
# Initialize FastAPI app | |
app = FastAPI() | |
# Load the SecurityLLM model | |
try: | |
llm = Llama.from_pretrained( | |
repo_id="QuantFactory/SecurityLLM-GGUF", | |
filename="SecurityLLM.Q5_K_M.gguf", # Ensure the file path is correct | |
) | |
except Exception as e: | |
raise RuntimeError(f"Failed to load model: {e}") | |
# Define request model for CV matching | |
class CVMatchRequest(BaseModel): | |
cv_text: str | |
job_descriptions: str | |
# Define response model | |
class CVMatchResponse(BaseModel): | |
results: list | |
# Define the route for CV and job description matching | |
async def match_cv_to_jobs(request: CVMatchRequest): | |
try: | |
# Split job descriptions by line | |
descriptions = request.job_descriptions.strip().split("\n") | |
results = [] | |
for description in descriptions: | |
# Create a prompt to compare the CV with each job description | |
prompt = ( | |
f"Compare the following job description with this resume. Job Description: {description}. " | |
f"Resume: {request.cv_text}. Provide a match score and a brief analysis." | |
) | |
# Generate response from the model | |
response = llm.create_chat_completion( | |
messages=[ | |
{ | |
"role": "user", | |
"content": prompt | |
} | |
] | |
) | |
# Extract and return the analysis text | |
analysis_text = response["choices"][0]["message"]["content"] | |
results.append({ | |
"Job Description": description, | |
"Analysis": analysis_text | |
}) | |
return CVMatchResponse(results=results) | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |
# To run the app, use: uvicorn app:app --reload | |