Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,150 +1,159 @@
|
|
1 |
-
from unittest import result
|
2 |
-
from tensorflow.keras.applications.vgg16 import preprocess_input
|
3 |
-
from
|
4 |
-
from
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
import
|
10 |
-
import
|
11 |
-
from
|
12 |
-
|
13 |
-
import
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from unittest import result
|
2 |
+
from tensorflow.keras.applications.vgg16 import preprocess_input
|
3 |
+
from tensorflow.python.keras.utils.layer_utils import get_source_inputs
|
4 |
+
from tensorflow.python.keras.layers import (
|
5 |
+
Flatten, Dense, Input, GlobalAveragePooling2D,
|
6 |
+
GlobalMaxPooling2D, Activation, Conv2D, MaxPooling2D,
|
7 |
+
BatchNormalization, AveragePooling2D, Reshape, Permute, multiply
|
8 |
+
)
|
9 |
+
from tensorflow.python.keras.models import Model
|
10 |
+
from tensorflow.python.keras import backend as K
|
11 |
+
from tensorflow.python.keras.utils.data_utils import get_file
|
12 |
+
from keras_vggface.vggface import VGGFace
|
13 |
+
from src.utils.all_utils import read_yaml, create_dir
|
14 |
+
import pickle
|
15 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
16 |
+
import streamlit as st
|
17 |
+
from PIL import Image
|
18 |
+
import os
|
19 |
+
import cv2
|
20 |
+
from mtcnn import MTCNN
|
21 |
+
import numpy as np
|
22 |
+
import base64
|
23 |
+
from static.load_css import local_css
|
24 |
+
|
25 |
+
|
26 |
+
local_css("static/style.css")
|
27 |
+
|
28 |
+
def get_base64(bin_file):
|
29 |
+
with open(bin_file, 'rb') as f:
|
30 |
+
data = f.read()
|
31 |
+
return base64.b64encode(data).decode()
|
32 |
+
|
33 |
+
def set_background(png_file):
|
34 |
+
bin_str = get_base64(png_file)
|
35 |
+
page_bg_img = '''
|
36 |
+
<style>
|
37 |
+
body {
|
38 |
+
background-image: url("data:image/png;base64,%s");
|
39 |
+
background-size: cover;
|
40 |
+
}
|
41 |
+
</style>
|
42 |
+
''' % bin_str
|
43 |
+
st.markdown(page_bg_img, unsafe_allow_html=True)
|
44 |
+
|
45 |
+
set_background('artifacts/images.jpg')
|
46 |
+
|
47 |
+
|
48 |
+
config = read_yaml("config/config.yaml")
|
49 |
+
params = read_yaml('params.yaml')
|
50 |
+
|
51 |
+
artifacts = config['artifacts']
|
52 |
+
artifacts_dirs = artifacts['artifacts_dir']
|
53 |
+
|
54 |
+
upload_image_dir = artifacts['upload_image_dir']
|
55 |
+
upload_path = os.path.join(artifacts_dirs, upload_image_dir)
|
56 |
+
|
57 |
+
pickle_format_dirs = artifacts['pickle_format_data_dir']
|
58 |
+
img_pickle_file_name = artifacts['img_pickle_file_name']
|
59 |
+
pickle_actor_name = artifacts['pickle_actor_names']
|
60 |
+
|
61 |
+
pickle_dir_path = os.path.join(artifacts_dirs, pickle_format_dirs)
|
62 |
+
pickle_file = os.path.join(pickle_dir_path, img_pickle_file_name)
|
63 |
+
pickle_actor = os.path.join(pickle_dir_path, pickle_actor_name)
|
64 |
+
|
65 |
+
feature_extractor_dir = artifacts['feature_extraction_dir']
|
66 |
+
extracted_feature_name = artifacts['extracted_features_name']
|
67 |
+
|
68 |
+
feature_extractor_path = os.path.join(artifacts_dirs, feature_extractor_dir)
|
69 |
+
feature_name = os.path.join(feature_extractor_path, extracted_feature_name)
|
70 |
+
|
71 |
+
data = params['base']['data_path']
|
72 |
+
|
73 |
+
model_name = params['base']['BASE_MODEL']
|
74 |
+
include_top = params['base']['include_top']
|
75 |
+
pooling = params['base']['pooling']
|
76 |
+
|
77 |
+
detector = MTCNN()
|
78 |
+
model = VGGFace(model= model_name, include_top=include_top, input_shape=(244,244,3), pooling=pooling)
|
79 |
+
|
80 |
+
filenames = pickle.load(open(pickle_file, 'rb'))
|
81 |
+
feature_list = pickle.load(open(feature_name, 'rb'))
|
82 |
+
actor_names = pickle.load(open(pickle_actor, 'rb'))
|
83 |
+
|
84 |
+
def extracted_features(img_path, model, detector):
|
85 |
+
img = cv2.imread(img_path)
|
86 |
+
result = detector.detect_faces(img)
|
87 |
+
|
88 |
+
x,y,width, heigth = result[0]['box']
|
89 |
+
face = img[y:y+heigth, x:x+width]
|
90 |
+
image = Image.fromarray(face)
|
91 |
+
image= image.resize((244,244))
|
92 |
+
|
93 |
+
face_array = np.asarray(image)
|
94 |
+
face_array = face_array.astype('float32')
|
95 |
+
|
96 |
+
expanded_img = np.expand_dims(face_array, axis= 0)
|
97 |
+
preprocess_img = preprocess_input(expanded_img)
|
98 |
+
result= model.predict(preprocess_img).flatten()
|
99 |
+
|
100 |
+
return result
|
101 |
+
|
102 |
+
def recommed(feature_list, features):
|
103 |
+
similarity = []
|
104 |
+
for i in range(len(feature_list)):
|
105 |
+
similarity.append(cosine_similarity(features.reshape(1,-1), feature_list[i].reshape(1,-1))[0][0])
|
106 |
+
|
107 |
+
|
108 |
+
result = sorted(list(enumerate(similarity)), reverse=True, key=lambda x: x[1])[0]
|
109 |
+
index_pos = result[0]
|
110 |
+
percentage = result[1]
|
111 |
+
|
112 |
+
return index_pos, percentage
|
113 |
+
|
114 |
+
def save_upload_image(upload_image):
|
115 |
+
try:
|
116 |
+
create_dir([upload_path])
|
117 |
+
with open(os.path.join(upload_path, upload_image.name), 'wb') as f:
|
118 |
+
f.write(upload_image.getbuffer())
|
119 |
+
return True
|
120 |
+
except:
|
121 |
+
return False
|
122 |
+
|
123 |
+
main_title = "<center><div><p class='highlight grey' style='font-size:47px'><span class='bold'>Guess your look alike celebrity</span></span></div></center>"
|
124 |
+
st.markdown(main_title, unsafe_allow_html=True)
|
125 |
+
|
126 |
+
|
127 |
+
uploaded_image = st.file_uploader('Choose a image')
|
128 |
+
if uploaded_image is not None:
|
129 |
+
if save_upload_image(uploaded_image):
|
130 |
+
|
131 |
+
display_image = Image.open(uploaded_image)
|
132 |
+
resized_display_img = display_image.resize((260,320), Image.ANTIALIAS)
|
133 |
+
|
134 |
+
|
135 |
+
upload_image_path = os.path.join(upload_path, uploaded_image.name)
|
136 |
+
features = extracted_features(upload_image_path, model, detector)
|
137 |
+
img_path, percentage = recommed(feature_list, features)
|
138 |
+
actor_path = filenames[img_path]
|
139 |
+
|
140 |
+
|
141 |
+
predicted_actor = " ".join(actor_path.split('\\')[2].split('_'))
|
142 |
+
|
143 |
+
actor_root_name = actor_path.split('\\')[2]
|
144 |
+
pred_actor_path = os.path.join(data,actor_root_name, '1.jpg' )
|
145 |
+
|
146 |
+
pred_actor_image = Image.open(pred_actor_path)
|
147 |
+
resized_actor_img = pred_actor_image.resize((260,320), Image.ANTIALIAS)
|
148 |
+
|
149 |
+
st.header(f'You look like {predicted_actor} with {np.round(percentage*100,0)}% similarity')
|
150 |
+
|
151 |
+
col1, col2 = st.beta_columns(2)
|
152 |
+
|
153 |
+
with col1:
|
154 |
+
st.markdown("Thats you")
|
155 |
+
st.image(resized_display_img)
|
156 |
+
|
157 |
+
with col2:
|
158 |
+
st.markdown("Your look alike celelb")
|
159 |
+
st.image(resized_actor_img)
|