LovnishVerma commited on
Commit
d2392e9
·
verified ·
1 Parent(s): 4713744

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -95
app.py CHANGED
@@ -1,25 +1,36 @@
1
- from PIL import Image
2
  import numpy as np
3
  import cv2
 
4
  import requests
5
  import face_recognition
 
6
  import os
7
- import streamlit as st
8
 
9
  # Set page title and description
10
  st.set_page_config(
11
- page_title="Aadhaar Based Face Recognition Attendance System",
12
  page_icon="📷",
13
  layout="centered",
14
  initial_sidebar_state="collapsed"
15
  )
16
- st.title("Attendance System Using Face Recognition 📷")
17
- st.markdown("This app recognizes faces in an image, verifies Aadhaar card details, and updates attendance records with the current timestamp.")
18
 
19
- # Load images for face recognition
20
- Images = [] # List to store Images
21
- classnames = [] # List to store classnames
22
- aadhar_numbers = [] # List to store Aadhaar numbers
 
 
 
 
 
 
 
 
 
 
23
 
24
  directory = "photos"
25
  myList = os.listdir(directory)
@@ -30,103 +41,81 @@ for cls in myList:
30
  curImg = cv2.imread(img_path)
31
  Images.append(curImg)
32
  classnames.append(os.path.splitext(cls)[0])
33
- # Assume Aadhaar number is part of the image filename (e.g., "123456_john.jpg")
34
- aadhar_numbers.append(cls.split('_')[0])
35
-
36
- # Function to validate Aadhaar card number
37
- def validate_aadhaar(aadhaar):
38
- # Implement your Aadhaar card validation logic here
39
- # For simplicity, let's assume any 6-digit number is a valid Aadhaar card
40
- return len(aadhaar) == 6 and aadhaar.isdigit()
41
 
42
- # Function to update Aadhaar data
43
- def update_data(name, aadhaar_number):
44
  url = "https://huggingface.glitch.me"
45
  url1 = "/update"
46
- data = {'name': name, 'aadhaar': aadhaar_number}
47
- response = requests.post(url + url1, data=data)
48
 
49
- if response.status_code == 200:
50
- st.success("Data updated on: " + url)
51
- else:
52
- st.warning("Data not updated")
 
 
 
 
 
53
 
54
  # Function to display image with overlay
55
- def display_image_with_overlay(image, name):
56
- # Add overlay to the image (e.g., bounding box and name)
57
- # ...
58
-
59
- # Apply styling with CSS
60
- st.markdown('<style>img { animation: pulse 2s infinite; }</style>', unsafe_allow_html=True)
61
  st.image(image, use_column_width=True, output_format="PNG")
62
 
63
- # Take input Aadhaar card details
64
- aadhaar_number = st.text_input("Enter your Last 6-digits Aadhaar Number:")
65
-
66
- # Take picture using the camera
67
- img_file_buffer = st.camera_input("Take a picture")
68
-
69
  # Load images for face recognition
70
  encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images]
71
 
72
- if img_file_buffer is not None:
73
- # Validate Aadhaar card number
74
- if validate_aadhaar(aadhaar_number):
75
- test_image = Image.open(img_file_buffer)
76
- image = np.asarray(test_image)
77
-
78
- imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
79
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
80
- facesCurFrame = face_recognition.face_locations(imgS)
81
- encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
82
-
83
- name = "Unknown" # Default name for unknown faces
84
- match_found = False # Flag to track if a match is found
85
-
86
- # Checking if faces are detected
87
- if len(encodesCurFrame) > 0:
88
- for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
89
- # Assuming that encodeListknown is defined and populated in your code
90
- matches = face_recognition.compare_faces(encodeListknown, encodeFace)
91
- faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
92
- matchIndex = np.argmin(faceDis)
93
-
94
- if matches[matchIndex]:
95
- name = classnames[matchIndex].upper()
96
-
97
- # Check if Aadhaar number is found in the database
98
- if aadhaar_number not in aadhar_numbers:
99
- st.error("Face recognized, but Aadhaar number not found in the database.")
100
- else:
101
- # Update data only if a known face is detected and Aadhaar number is valid
102
- update_data(name, aadhaar_number)
103
- match_found = True # Set the flag to True
104
-
105
- else:
106
- # Face recognized, but not matched with Aadhaar number
107
- st.error("Face recognized, but Aadhaar number does not match.")
108
-
109
- y1, x2, y2, x1 = faceLoc
110
- y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
111
- cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
112
- cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
113
- cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
114
-
115
- display_image_with_overlay(image, name)
116
-
117
- # Display the name corresponding to the entered Aadhaar number
118
- if not match_found:
119
- # Match Aadhaar number with the list
120
- aadhar_index = aadhar_numbers.index(aadhaar_number) if aadhaar_number in aadhar_numbers else None
121
- if aadhar_index is not None:
122
- st.success(f"Match found: {classnames[aadhar_index]}")
123
- else:
124
- st.warning("Face not detected, and Aadhaar number not found in the database.")
125
- else:
126
- st.success(f"Face recognized: {name}")
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  else:
129
- st.warning("No faces detected in the image. Face recognition failed.")
130
-
131
  else:
132
- st.error("Invalid Aadhaar card number. Please enter a valid 6-digit Aadhaar number.")
 
1
+ import streamlit as st
2
  import numpy as np
3
  import cv2
4
+ from PIL import Image
5
  import requests
6
  import face_recognition
7
+ from keras.models import load_model
8
  import os
 
9
 
10
  # Set page title and description
11
  st.set_page_config(
12
+ page_title="Face Recognition Attendance System With Emotion Detection",
13
  page_icon="📷",
14
  layout="centered",
15
  initial_sidebar_state="collapsed"
16
  )
17
+ st.title("Attendance System Using Face Recognition and Emotion Detection 📷")
18
+ st.markdown("This app recognizes faces in an image, detects emotions, and updates attendance records with the current timestamp.")
19
 
20
+ # Load emotion detection model
21
+ @st.cache_resource
22
+ def load_emotion_model():
23
+ model = load_model('CNN_Model_acc_75.h5')
24
+ return model
25
+
26
+ emotion_model = load_emotion_model()
27
+
28
+ # Emotion labels
29
+ emotion_labels = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
30
+
31
+ # Load known faces and classnames
32
+ Images = []
33
+ classnames = []
34
 
35
  directory = "photos"
36
  myList = os.listdir(directory)
 
41
  curImg = cv2.imread(img_path)
42
  Images.append(curImg)
43
  classnames.append(os.path.splitext(cls)[0])
 
 
 
 
 
 
 
 
44
 
45
+ # Function to update attendance data
46
+ def update_data(name, emotion):
47
  url = "https://huggingface.glitch.me"
48
  url1 = "/update"
 
 
49
 
50
+ data = {'name': name, 'emotion': emotion}
51
+ try:
52
+ response = requests.post(url + url1, data=data)
53
+ if response.status_code == 200:
54
+ st.success("Attendance updated successfully!")
55
+ else:
56
+ st.warning("Failed to update attendance!")
57
+ except Exception as e:
58
+ st.error(f"Error updating attendance: {e}")
59
 
60
  # Function to display image with overlay
61
+ def display_image_with_overlay(image, name, emotion):
62
+ cv2.putText(image, f"{name} is feeling {emotion}", (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
 
 
 
 
63
  st.image(image, use_column_width=True, output_format="PNG")
64
 
 
 
 
 
 
 
65
  # Load images for face recognition
66
  encodeListknown = [face_recognition.face_encodings(img)[0] for img in Images]
67
 
68
+ # Upload image using the file uploader
69
+ img_file_buffer = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ if img_file_buffer is not None:
72
+ test_image = Image.open(img_file_buffer)
73
+ image = np.asarray(test_image)
74
+
75
+ imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
76
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
77
+ facesCurFrame = face_recognition.face_locations(imgS)
78
+ encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
79
+
80
+ name = "Unknown" # Default name for unknown faces
81
+ match_found = False # Flag to track if a match is found
82
+
83
+ # Emotion detection part
84
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
85
+ emotion = "Neutral" # Default emotion
86
+
87
+ if len(encodesCurFrame) > 0:
88
+ for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
89
+ # Emotion detection
90
+ y1, x2, y2, x1 = faceLoc
91
+ roi = imgS[y1:y2, x1:x2]
92
+ roi = cv2.resize(roi, (48, 48)) # Resize to fit model
93
+ roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
94
+ roi = np.expand_dims(roi, axis=0) / 255.0 # Preprocess the image
95
+ emotion_predictions = emotion_model.predict(roi)
96
+ emotion = emotion_labels[np.argmax(emotion_predictions)]
97
+
98
+ # Face recognition logic
99
+ matches = face_recognition.compare_faces(encodeListknown, encodeFace)
100
+ faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
101
+ matchIndex = np.argmin(faceDis)
102
+
103
+ if matches[matchIndex]:
104
+ name = classnames[matchIndex].upper()
105
+ update_data(name, emotion)
106
+ match_found = True
107
+
108
+ y1, x2, y2, x1 = faceLoc
109
+ y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
110
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
111
+ cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
112
+ cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
113
+
114
+ display_image_with_overlay(image, name, emotion)
115
+
116
+ if match_found:
117
+ st.success(f"Face recognized: {name} and Emotion: {emotion}")
118
  else:
119
+ st.warning("Face not detected, or no match found in the database.")
 
120
  else:
121
+ st.warning("No faces detected in the image. Face recognition failed.")