Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, HTTPException | |
from pydantic import BaseModel | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import PeftModel, PeftConfig | |
import torch | |
# Initialize the FastAPI app | |
app = FastAPI() | |
# Load model and tokenizer once at startup | |
base_model_name = "akjindal53244/Llama-3.1-Storm-8B" | |
peft_model_id = "LlamaFactoryAI/cv-job-description-matching" | |
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, torch_dtype=torch.float16) | |
model = PeftModel.from_pretrained(base_model, peft_model_id, torch_dtype=torch.float16) | |
tokenizer = AutoTokenizer.from_pretrained(base_model_name) | |
config = PeftConfig.from_pretrained(peft_model_id) | |
# Define request model | |
class AnalysisRequest(BaseModel): | |
cv: str | |
job_description: str | |
async def analyze(request: AnalysisRequest): | |
try: | |
# Prepare input text with formatted message | |
system_prompt = """ | |
You are an advanced AI model designed to analyze the compatibility between a CV and a job description. You will receive a CV and a job description. Your task is to output a structured JSON format that includes the following: | |
1. matching_analysis: Analyze the CV against the job description to identify key strengths and gaps. | |
2. description: Summarize the relevance of the CV to the job description in a few concise sentences. | |
3. score: Provide a numerical compatibility score (0-100) based on qualifications, skills, and experience. | |
4. recommendation: Suggest actions for the candidate to improve their match or readiness for the role. | |
Your output must be in JSON format as follows: | |
{ | |
"matching_analysis": "Your detailed analysis here.", | |
"description": "A brief summary here.", | |
"score": 85, | |
"recommendation": "Your suggestions here." | |
} | |
""" | |
user_input = f"<CV> {request.cv} </CV>\n<job_description> {request.job_description} </job_description>" | |
input_text = system_prompt + user_input | |
# Tokenize and generate response | |
inputs = tokenizer(input_text, return_tensors="pt") | |
outputs = model.generate(**inputs, max_new_tokens=64) | |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return {"analysis": generated_text} | |
except Exception as e: | |
raise HTTPException(status_code=500, detail=str(e)) | |