Fralet commited on
Commit
324d859
1 Parent(s): 1e7fadb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  import pandas as pd
 
4
  import re
5
  import nltk
6
  from nltk.corpus import stopwords
@@ -13,14 +13,14 @@ nltk.download('wordnet')
13
  # Initialize the zero-shot classification pipeline
14
  classifier = pipeline("zero-shot-classification", model="Fralet/personality")
15
 
16
- # Define the candidate labels according to the Enneagram types
17
  default_labels = ["Peacemaker", "Loyalist", "Achiever", "Reformer", "Individualist", "Helper", "Challenger", "Investigator", "Enthusiast"]
18
 
19
  # Streamlit interface setup
20
  st.title("Resume-based Personality Prediction by Serikov Ayanbek")
21
 
22
  # Load data from Excel
23
- data = pd.read_excel("ResponseTest.xlsx") # Replace 'your_excel_file.xlsx' with your actual file name
24
 
25
  # Preprocess text function
26
  def preprocess_text(text):
@@ -28,30 +28,32 @@ def preprocess_text(text):
28
  text = text.lower()
29
  text = re.sub(r'\s+[a-z]\s+', ' ', text)
30
  text = re.sub(r'^[a-z]\s+', ' ', text)
31
- text = re.sub(r'\s+', ' ', text)
32
  stop_words = set(stopwords.words('english'))
33
  lemmatizer = WordNetLemmatizer()
34
  tokens = text.split()
35
  tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
36
  return ' '.join(tokens)
37
 
38
- # User-defined labels option
 
 
 
 
39
  user_labels = st.text_input("Enter custom labels separated by comma (optional)")
40
  labels = user_labels.split(',') if user_labels else default_labels
41
-
42
- # Prediction confidence threshold
43
  confidence_threshold = st.slider("Confidence Threshold", 0.0, 1.0, 0.5)
44
 
45
  if st.button("Predict Personality"):
46
- # Combine relevant text columns
47
- question_columns = [f'Q{i}' for i in range(1, 37)] # Adjust range if there are more or fewer question columns
48
- data['combined_text'] = data[['CV/Resume'] + question_columns].agg(' '.join, axis=1)
49
- data['processed_text'] = data['combined_text'].apply(preprocess_text)
50
 
51
- # Make predictions
52
- predictions = data['processed_text'].apply(lambda x: classifier(x, labels))
 
53
 
54
- # Extract and display predictions
55
- data['predicted_labels'] = predictions.apply(lambda x: [label for label, score in zip(x['labels'], x['scores']) if score >= confidence_threshold])
56
- st.dataframe(data[['True_label', 'Predicted', 'predicted_labels']])
57
 
 
 
 
1
  import streamlit as st
 
2
  import pandas as pd
3
+ from transformers import pipeline
4
  import re
5
  import nltk
6
  from nltk.corpus import stopwords
 
13
  # Initialize the zero-shot classification pipeline
14
  classifier = pipeline("zero-shot-classification", model="Fralet/personality")
15
 
16
+ # Define the default candidate labels (modifiable if different labels are needed)
17
  default_labels = ["Peacemaker", "Loyalist", "Achiever", "Reformer", "Individualist", "Helper", "Challenger", "Investigator", "Enthusiast"]
18
 
19
  # Streamlit interface setup
20
  st.title("Resume-based Personality Prediction by Serikov Ayanbek")
21
 
22
  # Load data from Excel
23
+ data = pd.read_excel("your_excel_file.xlsx") # Adjust file path/name as necessary
24
 
25
  # Preprocess text function
26
  def preprocess_text(text):
 
28
  text = text.lower()
29
  text = re.sub(r'\s+[a-z]\s+', ' ', text)
30
  text = re.sub(r'^[a-z]\s+', ' ', text)
31
+ text is re.sub(r'\s+', ' ', text)
32
  stop_words = set(stopwords.words('english'))
33
  lemmatizer = WordNetLemmatizer()
34
  tokens = text.split()
35
  tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
36
  return ' '.join(tokens)
37
 
38
+ # Combine relevant text columns
39
+ data['combined_text'] = data[['CV/Resume'] + [f'Q{i}' for i in range(1, 37)]].agg(' '.join, axis=1)
40
+ data['processed_text'] = data['combined_text'].apply(preprocess_text)
41
+
42
+ # Streamlit user inputs
43
  user_labels = st.text_input("Enter custom labels separated by comma (optional)")
44
  labels = user_labels.split(',') if user_labels else default_labels
 
 
45
  confidence_threshold = st.slider("Confidence Threshold", 0.0, 1.0, 0.5)
46
 
47
  if st.button("Predict Personality"):
48
+ # Predict personality from processed text
49
+ data['predictions'] = data['processed_text'].apply(lambda x: classifier(x, labels))
 
 
50
 
51
+ # Extract predictions above confidence threshold and display alongside MAX labels
52
+ data['predicted_labels'] = data['predictions'].apply(lambda x: {label: f"{score*100:.2f}%" for label, score in zip(x['labels'], x['scores']) if score >= confidence_threshold})
53
+ data['MAX_labels'] = data.apply(lambda x: [x['MAX1'], x['MAX2'], x['MAX3']], axis=1)
54
 
55
+ st.write("Predictions and Labels:")
56
+ st.dataframe(data[['True_label', 'Predicted', 'predicted_labels', 'MAX_labels']])
 
57
 
58
+ # Run this last part to show the DataFrame outside the button press if needed
59
+ st.dataframe(data)