MrSinan commited on
Commit
4785a31
·
1 Parent(s): 5eff866

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +334 -0
app.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """app.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1GWMyMjaydEM_30nRtu1W_B2eaTWLCCuN
8
+
9
+ # T1
10
+ """
11
+
12
+ from tensorflow.keras.regularizers import l2
13
+ import pathlib
14
+ import tensorflow
15
+ from tensorflow import keras
16
+ from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense,Dropout,BatchNormalization
17
+ import tensorflow.keras
18
+ import pathlib
19
+ import tensorflow as tf
20
+ from tensorflow import keras
21
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
22
+ import tensorflow.keras.utils as utils
23
+ from tensorflow.keras.optimizers import Adam as adam
24
+ from tensorflow.keras.optimizers import SGD
25
+ from tensorflow.keras.optimizers import RMSprop
26
+ from tensorflow.keras.optimizers import Adagrad
27
+ from tensorflow.keras.callbacks import EarlyStopping ,ModelCheckpoint
28
+ import tensorflow as tf
29
+ from tensorflow.keras import Model
30
+ import matplotlib.pyplot as plt
31
+ import numpy as np
32
+ from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, GlobalAveragePooling2D, Dropout, Input
33
+ import gradio as gr
34
+ from tensorflow.keras.applications import VGG16
35
+
36
+ from tensorflow.keras.applications.resnet50 import preprocess_input
37
+ from matplotlib import pyplot
38
+ from PIL import Image
39
+ from numpy import asarray
40
+ from PIL import Image
41
+
42
+ import glob
43
+ import cv2
44
+ from tensorflow.keras import layers
45
+ from keras.models import load_model
46
+ from matplotlib import pyplot
47
+ from PIL import Image
48
+ from numpy import asarray
49
+ from mtcnn.mtcnn import MTCNN
50
+ import cv2
51
+ from mask_the_face import *
52
+ import numpy as np
53
+
54
+ def get_paths():
55
+ classes = []
56
+ for file in sorted(glob.iglob('./lfw-deepfunneled/*/')):
57
+ classes.append(file)
58
+ for i,d in enumerate(classes):
59
+ paths=d+'*.jpg'
60
+ class_=[]
61
+ for file in sorted(glob.iglob(paths)):
62
+ class_.append(file)
63
+ classes[i]=class_
64
+ return classes
65
+
66
+ classLabels=np.load('classLabels.npy',)
67
+
68
+ def extract_face(photo, required_size=(224, 224)):
69
+ # load image from file
70
+ pixels = photo
71
+ print(pixels.shape)
72
+ maxH=(pixels.shape[0])
73
+ maxW=(pixels.shape[1])
74
+ if (pixels.shape[-1])>3 or (pixels.shape[-1])<3:
75
+ image = Image.fromarray(pixels)
76
+ return image
77
+
78
+ # create the detector, using default weights
79
+ detector = MTCNN()
80
+ # detect faces in the image
81
+ results = detector.detect_faces(pixels)
82
+ if not results:
83
+ image = Image.fromarray(pixels)
84
+ image = image.resize(required_size)
85
+ print('not cropped')
86
+ return image
87
+ # extract the bounding box from the first face
88
+ print('cropped')
89
+ x1, y1, width, height = results[0]['box']
90
+ x2, y2 = x1 + width, y1 + height
91
+
92
+ face = pixels[y1:int(y2), int(x1):int(x2)]
93
+ # resize pixels to the model size
94
+ image = Image.fromarray(face)
95
+ image = image.resize(required_size)
96
+
97
+ return image
98
+
99
+ class FaceNet():
100
+ def __init__(self,loading_path,facenet_path):
101
+ self.loading=loading_path
102
+ self.modelPath=facenet_path
103
+ self.data_augmentation = keras.Sequential([layers.Rescaling(scale=1./127.5, offset=-1),layers.Resizing(160, 160),],name="data_augmentation",)
104
+ self.Facenet=self.Transfer_FacenetModel_withNormlization()
105
+ self.Facenet.load_weights(self.loading)
106
+
107
+
108
+
109
+ def Transfer_FacenetModel_withNormlization(self):
110
+
111
+ facenetmodel = tf.keras.models.load_model(self.modelPath)
112
+ # facenetmodel.load_weights('/content/drive/MyDrive/FaceNet/facenet_keras_weights.h5')
113
+ for layer in facenetmodel.layers[:-50]:
114
+ layer.trainable = False
115
+ inputs = layers.Input(shape=(224,224,3))
116
+ # Augment data.
117
+ augmented = self.data_augmentation(inputs)
118
+ # This is 'bootstrapping' a new top_model onto the pretrained layers.
119
+ top_model = facenetmodel(augmented)
120
+ top_model = Dropout(0.5)(top_model)
121
+ top_model = BatchNormalization()(top_model)
122
+ top_model = Flatten(name="flatten")(top_model)
123
+ output_layer = Dense(5750, activation='softmax')(top_model)
124
+
125
+ # Group the convolutional base and new fully-connected layers into a Model object.
126
+ model = Model(inputs=inputs, outputs=output_layer)
127
+
128
+ return model
129
+ def predict(self,testsSamples):
130
+ predictionProbabilty=self.Facenet.predict(testsSamples)
131
+ return predictionProbabilty
132
+
133
+ class PatchEncoder(layers.Layer):
134
+ def __init__(self, num_patches, projection_dim):
135
+ super(PatchEncoder, self).__init__()
136
+ self.num_patches = num_patches
137
+ self.projection = layers.Dense(units=projection_dim)
138
+ self.position_embedding = layers.Embedding(
139
+ input_dim=num_patches, output_dim=projection_dim
140
+ )
141
+
142
+ def call(self, patch):
143
+ positions = tf.range(start=0, limit=self.num_patches, delta=1)
144
+ encoded = self.projection(patch) + self.position_embedding(positions)
145
+ return encoded
146
+
147
+ class Patches(layers.Layer):
148
+ def __init__(self, patch_size):
149
+ super(Patches, self).__init__()
150
+ self.patch_size = patch_size
151
+
152
+ def call(self, images):
153
+ batch_size = tf.shape(images)[0]
154
+ patches = tf.image.extract_patches(
155
+ images=images,
156
+ sizes=[1, self.patch_size, self.patch_size, 1],
157
+ strides=[1, self.patch_size, self.patch_size, 1],
158
+ rates=[1, 1, 1, 1],
159
+ padding="VALID",
160
+ )
161
+ patch_dims = patches.shape[-1]
162
+ patches = tf.reshape(patches, [batch_size, -1, patch_dims])
163
+ return patches
164
+
165
+ class Transforemer():
166
+ def __init__(self,loading_path):
167
+ self.learning_rate = 0.001
168
+ self.weight_decay = 0.0001
169
+ self.batch_size = 32
170
+ self.num_epochs = 300
171
+ self.image_size = 72
172
+ self.patch_size = 6 # Size of the patches to be extract from the input images
173
+ self.num_patches = (self.image_size // self.patch_size) ** 2
174
+ self.projection_dim = 64
175
+ self.num_heads = 8
176
+ self.transformer_units = [self.projection_dim * 2,self.projection_dim,] # Size of the transformer layers
177
+ self.transformer_layers = 10
178
+
179
+ self.mlp_head_units = [2048, 1024] # Size of the dense layers of the final classifier
180
+ self.loading=loading_path
181
+ self.data_augmentation = keras.Sequential([ layers.Rescaling(1./255), layers.Resizing(self.image_size, self.image_size), layers.RandomFlip("horizontal")],name="data_augmentation",)
182
+ self.transformer = self.create_vit_classifier()
183
+ self.trnaformer = self.transformer.load_weights(self.loading)
184
+
185
+ def mlp(self,x, hidden_units, dropout_rate):
186
+ for units in hidden_units:
187
+ x = layers.Dense(units, activation=tf.nn.gelu)(x)
188
+ x = layers.Dropout(dropout_rate)(x)
189
+ return x
190
+ def create_vit_classifier(self):
191
+
192
+ inputs = layers.Input(shape=(224,224,3))
193
+
194
+ augmented = self.data_augmentation(inputs)
195
+ patches = Patches(self.patch_size)(augmented)
196
+ encoded_patches = PatchEncoder(self.num_patches, self.projection_dim)(patches)
197
+
198
+ for _ in range(self.transformer_layers):
199
+ x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
200
+ attention_output = layers.MultiHeadAttention(num_heads=self.num_heads, key_dim=self.projection_dim, dropout=0.3)(x1, x1)
201
+ x2 = layers.Add()([attention_output, encoded_patches])
202
+ x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
203
+ x3 = self.mlp(x3, hidden_units=self.transformer_units, dropout_rate=0.3)
204
+ encoded_patches = layers.Add()([x3, x2])
205
+
206
+ representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
207
+ representation = layers.Flatten()(representation)
208
+ representation = layers.Dropout(0.6)(representation)
209
+ features = self.mlp(representation, hidden_units=self.mlp_head_units, dropout_rate=0.6)
210
+ logits = layers.Dense(5750, activation='softmax')(features)
211
+ model = keras.Model(inputs=inputs, outputs=logits)
212
+
213
+ return model
214
+
215
+ def predict(self,testsSamples):
216
+ predictionProbabilty=self.transformer.predict(testsSamples)
217
+ return predictionProbabilty
218
+
219
+ class EnsembleModel():
220
+ def __init__(self,classLabels,model1,model2,model3,model4):
221
+ self.labels=classLabels
222
+ self.model1 =model1
223
+ self.model2 =model2
224
+ self.model3 =model3
225
+ self.model4 =model4
226
+ def predict(self,testSample,):
227
+ pred_prob1=self.model1.predict(testSample)
228
+ pred_prob2=self.model2.predict(testSample)
229
+ pred_prob3=self.model3.predict(testSample)
230
+ pred_prob4=self.model4.predict(testSample)
231
+ pred_sum=pred_prob1+pred_prob2+pred_prob3+pred_prob4
232
+ print(pred_sum.shape)
233
+ preds_classes_sum = np.argmax(pred_sum, axis=-1)
234
+ total=sum(pred_sum[0])
235
+ print(total)
236
+ percentages=[x/total for x in pred_sum[0]]
237
+ percentages=np.asarray(percentages)
238
+ idx = np.argsort(pred_sum, axis=1)[:,-5:]
239
+ print(pred_sum[0][idx])
240
+ print(percentages[idx])
241
+ return self.labels[preds_classes_sum][0],np.flip(self.labels[idx]),np.flip(percentages[idx])
242
+
243
+ """# Test
244
+
245
+ """
246
+
247
+ faceModel1=FaceNet('./Models/MyEn3facenet.h5','./Models/facenet_keras.h5')
248
+ faceModel2=FaceNet('./Models/MyEn4facenet.h5','./Models/facenet_keras.h5')
249
+ transformerModel1=Transforemer('./Models/FirstTransformer3Ensamble1.h5')
250
+ transformerModel2=Transforemer('./Models/FirstTransformer3Ensamble2.h5')
251
+
252
+ Ensemble=EnsembleModel(classLabels,faceModel1,faceModel2,transformerModel1,transformerModel2)
253
+
254
+ OneList = list(np.concatenate(classes))
255
+
256
+ def grid_display(list_of_images, list_of_titles=[], no_of_columns=2, figsize=(10,10)):
257
+
258
+ fig = plt.figure(figsize=figsize)
259
+ column = 0
260
+ for i in range(len(list_of_images)):
261
+ column += 1
262
+ # check for end of column and create a new figure
263
+ if column == no_of_columns+1:
264
+ fig = plt.figure(figsize=figsize)
265
+ column = 1
266
+ fig.add_subplot(1, no_of_columns, column)
267
+ plt.imshow(list_of_images[i])
268
+ plt.axis('off')
269
+ if len(list_of_titles) >= len(list_of_images):
270
+ plt.title(list_of_titles[i])
271
+
272
+ def reconitionPipline(img,mask):
273
+
274
+ im = Image.fromarray(img.astype('uint8'), 'RGB')
275
+ im=np.array(im)
276
+ im2= im[:,:,::-1].copy()
277
+
278
+ if mask:
279
+ im2=maskThisImages(im2)
280
+ if len(im2)==0:
281
+ im2=im.copy()
282
+ im2= im2[:,:,::-1]
283
+
284
+ im2= im2[:,:,::-1]
285
+ temp=extract_face(im2)
286
+ cropped = np.array(temp)
287
+ open_cv_image = cropped[:, :, ::-1].copy()
288
+
289
+ prediction,top5,percentage=Ensemble.predict(open_cv_image[None,...])
290
+ return dict(zip(np.reshape(top5, -1), np.reshape(percentage, -1))),cropped
291
+
292
+ with gr.Blocks() as demo:
293
+ gr.HTML(
294
+ """
295
+ <div style="text-align: center; max-width: 650px; margin: 0 auto;">
296
+ <div
297
+ style="
298
+ display: inline-flex;
299
+ align-items: center;
300
+ gap: 0.8rem;
301
+ font-size: 1.75rem;
302
+ "
303
+ >
304
+
305
+ <h1 style="font-weight: 900; margin-bottom: 7px;">
306
+ LFW-Masked Recognition
307
+ </h1>
308
+ </div>
309
+ <p style="margin-bottom: 10px; font-size: 94%">
310
+ An AI model developed using Ensamble learning method
311
+ with transformer and facenet to recognize celebrties classes in LFW dataset (+5700 class)
312
+
313
+ </p>
314
+ </div>
315
+ """
316
+ )
317
+ with gr.Row():
318
+ with gr.Column():
319
+ imagein = gr.Image(label='User-Input',interactive=True)
320
+
321
+ # with gr.Column():
322
+ # gr.Examples(['1.jpg','2.jpg','3.jpg'],inputs=imagein)
323
+ with gr.Row():
324
+ checkbox=gr.Checkbox(label='Mask the face')
325
+ image_button = gr.Button("Submit")
326
+
327
+ with gr.Row():
328
+ mOut = gr.Image(type='numpy',label=' (Model-input)')
329
+ label = gr.Label(num_top_classes=5)
330
+
331
+
332
+ gr.Markdown("<p style='text-align: center'>Made with 🖤 by Mohammed & Aseel </p>")
333
+ image_button.click(fn=reconitionPipline,inputs=[imagein,checkbox],outputs=[label,mOut])
334
+ demo.launch()