File size: 2,900 Bytes
74b2d91
 
 
 
 
15642bc
74b2d91
 
935b026
74b2d91
 
 
355a562
935b026
 
 
74b2d91
 
 
355a562
 
74b2d91
355a562
eaa7573
355a562
74b2d91
355a562
74b2d91
 
 
355a562
74b2d91
355a562
 
 
 
1aa889d
8feb066
348c3ba
 
 
 
74b2d91
 
348c3ba
355a562
74b2d91
 
 
935b026
348c3ba
355a562
935b026
 
348c3ba
935b026
348c3ba
935b026
 
348c3ba
 
 
74b2d91
355a562
935b026
348c3ba
ee14dad
 
8feb066
74b2d91
74690f9
ee14dad
 
 
 
74b2d91
 
 
ee14dad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
import face_recognition
import cv2
import numpy as np
from PIL import Image
import pickle
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from firebase_admin import storage

# Initialize Firebase
cred = credentials.Certificate("serviceAccountKey.json")  # Update with your credentials path
firebase_app = firebase_admin.initialize_app(cred, {
    'databaseURL': 'https://faceantendancerealtime-default-rtdb.firebaseio.com/',
    'storageBucket': 'faceantendancerealtime.appspot.com'
})
bucket = storage.bucket()

# Function to download face encodings from Firebase Storage
def download_encodings():
    blob = bucket.blob('EncodeFile.p')
    blob.download_to_filename('EncodeFile.p')
    with open('EncodeFile.p', 'rb') as file:
        return pickle.load(file)

encodeListKnownWithIds = download_encodings()
encodeListKnown, studentsIds = encodeListKnownWithIds

def recognize_face(input_image):
    # Convert PIL Image to numpy array
    img = np.array(input_image)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    # Detect faces and encode
    face_locations = face_recognition.face_locations(img)
    face_encodings = face_recognition.face_encodings(img, face_locations)
    # Initialize the database reference
    ref = db.reference('Students')
    
    # Recognize faces and fetch data from the database
    results = []
    for face_encoding in face_encodings:
        matches = face_recognition.compare_faces(encodeListKnown, face_encoding)
        name = "Unknown"
        student_info = {}

        face_distances = face_recognition.face_distance(encodeListKnown, face_encoding)
        best_match_index = np.argmin(face_distances)
        if matches[best_match_index]:
            student_id = studentsIds[best_match_index]
            student_info = ref.child(student_id).get()

            if student_info:
                name = student_info['name']
                results.append(student_info)
            else:
                results.append({'name': 'Unknown'})
        
        # Draw rectangles around the faces
        for (top, right, bottom, left), name in zip(face_locations, [student_info.get('name', 'Unknown') for student_info in results]):
            cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)
            cv2.putText(img, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255), 1)

    # Convert back to PIL Image
    pil_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    return pil_img, results


# Gradio interface
iface = gr.Interface(
    fn=recognize_face,
    inputs=gr.Image(type="pil"),
    outputs=[gr.Image(type="pil"), gr.JSON(label="Student Information")],
    title="Face Recognition Attendance System",
    description="Upload an image to identify individuals."
)

if __name__ == "__main__":
    iface.launch(debug=True,inline=False)