Spaces:
Sleeping
Sleeping
import os | |
import time | |
from langchain_openai import ChatOpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.agents import initialize_agent, AgentType | |
from langchain.tools import Tool | |
from dotenv import load_dotenv | |
from langchain_core.messages import HumanMessage | |
# Load environment variables | |
load_dotenv() | |
# Set environment variables for model API key and model type | |
os.environ.setdefault("DSP_CACHEBOOL", "false") | |
# Get API key | |
api_key = os.getenv("OPENAI_API_KEY") | |
if not api_key: | |
raise ValueError("OPENAI_API_KEY environment variable is not set") | |
# Initialize the chat model | |
model = ChatOpenAI( | |
model=os.getenv("MODEL", "gpt-3.5-turbo"), | |
openai_api_key=api_key, | |
temperature=0.0, | |
max_tokens=4000 | |
) | |
# Define the clarification prompt | |
clarification_prompt = PromptTemplate( | |
input_variables=["checkpoints", "resume"], | |
template=""" | |
You are an expert recruiter specializing in reading resumes against job descriptions. Your task is to read the checkpoints provided and extract objective and factual information (if available) from the resume to clarify these checkpoints. | |
**Guidelines:** | |
1. Analyze both explicit and implicit meanings from the resume. | |
2. For must-have certifications, consider only those explicitly mentioned. Do not assume. | |
3. For industry relevance, assess the organizations listed and determine their industries. | |
4. For education and certifications, verify if they match stated requirements. | |
5. Provide objective reasoning with factual pointers from the resume. | |
6. Do not hallucinate or include information not grounded in the resume. | |
7. If the resume lacks enough information, mention this explicitly. | |
**Checkpoints:** | |
{checkpoints} | |
**Resume:** | |
{resume} | |
**Output Format:** | |
Checkpoint 1: [Factual reasoning from resume based on checkpoint] | |
Checkpoint 2: [Factual reasoning from resume based on checkpoint] | |
""" | |
) | |
# Define a tool to process checkpoints and resume | |
class ClarificationProcessor(Tool): | |
def __init__(self, name: str, description: str, func): | |
super().__init__(name=name, func=func, description=description) | |
def _run(self, input: str): | |
# Extract checkpoints and resume from the input string | |
try: | |
# Split by "Resume:" and ensure we have both parts | |
parts = input.split("Resume:") | |
if len(parts) != 2: | |
return "Error: Input must contain both checkpoints and resume sections" | |
# Extract checkpoints and resume, removing any extra whitespace | |
checkpoints = parts[0].replace("Checkpoints:", "").strip() | |
resume = parts[1].strip() | |
if not checkpoints or not resume: | |
return "Error: Both checkpoints and resume sections must not be empty" | |
return self.func(checkpoints, resume) | |
except Exception as e: | |
return f"Error processing input: {str(e)}" | |
# Define a function to generate clarifications based on checkpoints and resume | |
def generate_clarifications(checkpoints: str, resume: str) -> str: | |
# Create the prompt | |
prompt_text = clarification_prompt.format(checkpoints=checkpoints, resume=resume) | |
# Get response from the model | |
response = model.invoke([HumanMessage(content=prompt_text)]) | |
return response.content | |
# Define the agent tools | |
tools = [ | |
ClarificationProcessor( | |
name="ClarificationProcessor", | |
description="Generate clarifications based on the provided checkpoints and resume.", | |
func=generate_clarifications, | |
), | |
] | |
# Initialize the LangChain agent | |
agent = initialize_agent( | |
tools, model, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True | |
) |