Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -13,47 +13,41 @@ nltk.download('wordnet')
|
|
13 |
# Initialize the zero-shot classification pipeline
|
14 |
classifier = pipeline("zero-shot-classification", model="Fralet/personality")
|
15 |
|
16 |
-
# Define the default candidate labels (modifiable if different labels are needed)
|
17 |
-
default_labels = ["Peacemaker", "Loyalist", "Achiever", "Reformer", "Individualist", "Helper", "Challenger", "Investigator", "Enthusiast"]
|
18 |
-
|
19 |
# Streamlit interface setup
|
20 |
st.title("Resume-based Personality Prediction by Serikov Ayanbek")
|
21 |
|
22 |
# Load data from Excel
|
23 |
-
data = pd.read_excel("
|
24 |
|
25 |
# Preprocess text function
|
26 |
def preprocess_text(text):
|
27 |
text = re.sub(r'\W', ' ', str(text))
|
28 |
text = text.lower()
|
29 |
-
text
|
30 |
text = re.sub(r'^[a-z]\s+', ' ', text)
|
31 |
-
text
|
32 |
stop_words = set(stopwords.words('english'))
|
33 |
lemmatizer = WordNetLemmatizer()
|
34 |
tokens = text.split()
|
35 |
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
|
36 |
return ' '.join(tokens)
|
37 |
|
38 |
-
# Combine relevant text columns
|
39 |
-
|
|
|
40 |
data['processed_text'] = data['combined_text'].apply(preprocess_text)
|
41 |
|
42 |
-
#
|
43 |
-
user_labels = st.text_input("Enter custom labels separated by comma (optional)")
|
44 |
-
labels = user_labels.split(',') if user_labels else default_labels
|
45 |
confidence_threshold = st.slider("Confidence Threshold", 0.0, 1.0, 0.5)
|
46 |
|
47 |
if st.button("Predict Personality"):
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
# Run this last part to show the DataFrame outside the button press if needed
|
59 |
-
st.dataframe(data)
|
|
|
13 |
# Initialize the zero-shot classification pipeline
|
14 |
classifier = pipeline("zero-shot-classification", model="Fralet/personality")
|
15 |
|
|
|
|
|
|
|
16 |
# Streamlit interface setup
|
17 |
st.title("Resume-based Personality Prediction by Serikov Ayanbek")
|
18 |
|
19 |
# Load data from Excel
|
20 |
+
data = pd.read_excel("your_excel_file.xlsx") # Replace 'your_excel_file.xlsx' with your actual file name
|
21 |
|
22 |
# Preprocess text function
|
23 |
def preprocess_text(text):
|
24 |
text = re.sub(r'\W', ' ', str(text))
|
25 |
text = text.lower()
|
26 |
+
text is re.sub(r'\s+[a-z]\s+', ' ', text)
|
27 |
text = re.sub(r'^[a-z]\s+', ' ', text)
|
28 |
+
text = re.sub(r'\s+', ' ', text)
|
29 |
stop_words = set(stopwords.words('english'))
|
30 |
lemmatizer = WordNetLemmatizer()
|
31 |
tokens = text.split()
|
32 |
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
|
33 |
return ' '.join(tokens)
|
34 |
|
35 |
+
# Combine relevant text columns for processing
|
36 |
+
question_columns = [f'Q{i}' for i in range(1, 37)] # Adjust range if needed
|
37 |
+
data['combined_text'] = data[['CV/Resume'] + question_columns].agg(' '.join, axis=1)
|
38 |
data['processed_text'] = data['combined_text'].apply(preprocess_text)
|
39 |
|
40 |
+
# Prediction confidence threshold
|
|
|
|
|
41 |
confidence_threshold = st.slider("Confidence Threshold", 0.0, 1.0, 0.5)
|
42 |
|
43 |
if st.button("Predict Personality"):
|
44 |
+
# Function to apply predictions using dynamic labels from MAX1, MAX2, MAX3
|
45 |
+
def get_predictions(row):
|
46 |
+
custom_labels = [row['MAX1'], row['MAX2'], row['MAX3']] # Get labels from each row
|
47 |
+
processed_text = row['processed_text']
|
48 |
+
result = classifier(processed_text, custom_labels)
|
49 |
+
return [label for label, score in zip(result['labels'], result['scores']) if score >= confidence_threshold]
|
50 |
+
|
51 |
+
# Apply predictions across all rows
|
52 |
+
data['predicted_labels'] = data.apply(get_predictions, axis=1)
|
53 |
+
st.dataframe(data[['True_label', 'Predicted', 'predicted_labels']])
|
|
|
|