saifeddinemk commited on
Commit
c7465ad
1 Parent(s): 3766c60

Fixed app v2

Browse files
Files changed (1) hide show
  1. app.py +10 -25
app.py CHANGED
@@ -1,26 +1,13 @@
1
  from fastapi import FastAPI, File, UploadFile, Form
2
- from llama_cpp import Llama
3
  from typing import List
4
- from fastapi.middleware.cors import CORSMiddleware
5
  import json
6
 
7
  # Initialize FastAPI app
8
  app = FastAPI()
9
 
10
- # Add CORS middleware to allow frontend access
11
- app.add_middleware(
12
- CORSMiddleware,
13
- allow_origins=["*"],
14
- allow_credentials=True,
15
- allow_methods=["*"],
16
- allow_headers=["*"],
17
- )
18
-
19
- # Load the Llama model
20
- llm = Llama.from_pretrained(
21
- repo_id="HuggingFaceTB/SmolLM2-1.7B-Instruct-GGUF",
22
- filename="smollm2-1.7b-instruct-q4_k_m.gguf",
23
- )
24
 
25
  # Endpoint to upload CV file and store CV text
26
  @app.post("/upload-cv/")
@@ -37,16 +24,14 @@ async def compare_job_cv(job_descriptions: str = Form(...), cv_text: str = Form(
37
  results = []
38
 
39
  for description in descriptions:
40
- # Create chat messages to prompt Llama for each job description
41
- messages = [
42
- {"role": "user", "content": f"Compare the following job description with this resume. Job Description: {description}. Resume: {cv_text}. Give a match score and brief analysis."}
43
- ]
44
 
45
- # Generate response using Llama
46
- response = llm.create_chat_completion(messages=messages)
47
- response_content = response["choices"][0]["message"]["content"]
48
 
49
- # Parse response content for a score and summary
50
  try:
51
  response_data = json.loads(response_content)
52
  results.append(response_data)
@@ -58,4 +43,4 @@ async def compare_job_cv(job_descriptions: str = Form(...), cv_text: str = Form(
58
 
59
  return {"results": results}
60
 
61
- # Run the app with `uvicorn <filename>:app --reload`
 
1
  from fastapi import FastAPI, File, UploadFile, Form
2
+ from transformers import pipeline
3
  from typing import List
 
4
  import json
5
 
6
  # Initialize FastAPI app
7
  app = FastAPI()
8
 
9
+ # Initialize the text generation pipeline with the Qwen model
10
+ pipe = pipeline("text-generation", model="Qwen/Qwen2.5-1.5B-Instruct")
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Endpoint to upload CV file and store CV text
13
  @app.post("/upload-cv/")
 
24
  results = []
25
 
26
  for description in descriptions:
27
+ # Create prompt for the Qwen model
28
+ prompt = f"Compare the following job description with this resume. Job Description: {description}. Resume: {cv_text}. Give a match score and brief analysis."
 
 
29
 
30
+ # Generate response using the text generation pipeline
31
+ response = pipe(prompt, max_length=100, num_return_sequences=1)
32
+ response_content = response[0]["generated_text"]
33
 
34
+ # Parse response content for score and summary if JSON formatted
35
  try:
36
  response_data = json.loads(response_content)
37
  results.append(response_data)
 
43
 
44
  return {"results": results}
45
 
46
+