mh_test / mh_classification.py
Sanjay Malladi
Your commit message
2792fd5
import os
import time
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.agents import initialize_agent, AgentType
from langchain.tools import Tool
from dotenv import load_dotenv
from langchain_core.messages import HumanMessage
from prompts.mh_clarification import clarification_prompt
# Load environment variables
load_dotenv()
# Set environment variables for model API key and model type
os.environ.setdefault("DSP_CACHEBOOL", "false")
# Get API key
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set")
# Initialize the chat model
model = ChatOpenAI(
model=os.getenv("MODEL", "gpt-3.5-turbo"),
openai_api_key=api_key,
temperature=0.0,
max_tokens=4000
)
# Define a tool to process job descriptions
class JobDescriptionProcessor(Tool):
def __init__(self, name: str, description: str, func):
super().__init__(name=name, func=func, description=description)
def _run(self, input: str):
try:
# Extract key aspects from the job description
aspects = self.func(input)
return aspects
except Exception as e:
return f"Error processing job description: {str(e)}"
# Define a function to extract key aspects from job description
def extract_key_aspects(jd: str) -> str:
try:
# Create a prompt for extracting key aspects
prompt = f"""Extract the key aspects from this job description. Focus on:
1. Position title
2. Required qualifications
3. Experience requirements
4. Key skills
5. Must-have requirements
Job Description:
{jd}
Format the output as a clear list of key aspects."""
# Get response from the model
response = model.invoke([HumanMessage(content=prompt)])
return response.content
except Exception as e:
return f"Error extracting key aspects: {str(e)}"
# Define a tool to process checkpoints and resume
class ClarificationProcessor(Tool):
def __init__(self, name: str, description: str, func):
super().__init__(name=name, func=func, description=description)
def _run(self, input: str):
# Extract checkpoints and resume from the input string
try:
# Split by "Resume:" and ensure we have both parts
parts = input.split("Resume:")
if len(parts) != 2:
return "Error: Input must contain both checkpoints and resume sections"
# Extract checkpoints and resume, removing any extra whitespace
checkpoints = parts[0].replace("Checkpoints:", "").strip()
resume = parts[1].strip()
if not checkpoints or not resume:
return "Error: Both checkpoints and resume sections must not be empty"
return self.func(checkpoints, resume)
except Exception as e:
return f"Error processing input: {str(e)}"
# Define a function to generate clarifications based on checkpoints and resume
def generate_clarifications(checkpoints: str, resume: str) -> str:
try:
# Create the prompt
prompt_text = clarification_prompt.format(checkpoints=checkpoints, resume=resume)
# Get response from the model
response = model.invoke([HumanMessage(content=prompt_text)])
return response.content
except Exception as e:
return f"Error generating clarifications: {str(e)}"
# Define the agent tools
tools = [
JobDescriptionProcessor(
name="JobDescriptionProcessor",
description="Extract key aspects from a job description.",
func=extract_key_aspects,
),
ClarificationProcessor(
name="ClarificationProcessor",
description="Generate clarifications based on the provided checkpoints and resume.",
func=generate_clarifications,
),
]
# Initialize the LangChain agent
agent = initialize_agent(
tools, model, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)