ombhojane commited on
Commit
718a03f
·
verified ·
1 Parent(s): a56aec5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -150
app.py CHANGED
@@ -1,150 +1,159 @@
1
- from unittest import result
2
- from tensorflow.keras.applications.vgg16 import preprocess_input
3
- from keras_vggface.vggface import VGGFace
4
- from src.utils.all_utils import read_yaml, create_dir
5
- import pickle
6
- from sklearn.metrics.pairwise import cosine_similarity
7
- import streamlit as st
8
- from PIL import Image
9
- import os
10
- import cv2
11
- from mtcnn import MTCNN
12
- import numpy as np
13
- import base64
14
- from static.load_css import local_css
15
-
16
-
17
- local_css("static/style.css")
18
-
19
- def get_base64(bin_file):
20
- with open(bin_file, 'rb') as f:
21
- data = f.read()
22
- return base64.b64encode(data).decode()
23
-
24
- def set_background(png_file):
25
- bin_str = get_base64(png_file)
26
- page_bg_img = '''
27
- <style>
28
- body {
29
- background-image: url("data:image/png;base64,%s");
30
- background-size: cover;
31
- }
32
- </style>
33
- ''' % bin_str
34
- st.markdown(page_bg_img, unsafe_allow_html=True)
35
-
36
- set_background('artifacts/images.jpg')
37
-
38
-
39
- config = read_yaml("config/config.yaml")
40
- params = read_yaml('params.yaml')
41
-
42
- artifacts = config['artifacts']
43
- artifacts_dirs = artifacts['artifacts_dir']
44
-
45
- upload_image_dir = artifacts['upload_image_dir']
46
- upload_path = os.path.join(artifacts_dirs, upload_image_dir)
47
-
48
- pickle_format_dirs = artifacts['pickle_format_data_dir']
49
- img_pickle_file_name = artifacts['img_pickle_file_name']
50
- pickle_actor_name = artifacts['pickle_actor_names']
51
-
52
- pickle_dir_path = os.path.join(artifacts_dirs, pickle_format_dirs)
53
- pickle_file = os.path.join(pickle_dir_path, img_pickle_file_name)
54
- pickle_actor = os.path.join(pickle_dir_path, pickle_actor_name)
55
-
56
- feature_extractor_dir = artifacts['feature_extraction_dir']
57
- extracted_feature_name = artifacts['extracted_features_name']
58
-
59
- feature_extractor_path = os.path.join(artifacts_dirs, feature_extractor_dir)
60
- feature_name = os.path.join(feature_extractor_path, extracted_feature_name)
61
-
62
- data = params['base']['data_path']
63
-
64
- model_name = params['base']['BASE_MODEL']
65
- include_top = params['base']['include_top']
66
- pooling = params['base']['pooling']
67
-
68
- detector = MTCNN()
69
- model = VGGFace(model= model_name, include_top=include_top, input_shape=(244,244,3), pooling=pooling)
70
-
71
- filenames = pickle.load(open(pickle_file, 'rb'))
72
- feature_list = pickle.load(open(feature_name, 'rb'))
73
- actor_names = pickle.load(open(pickle_actor, 'rb'))
74
-
75
- def extracted_features(img_path, model, detector):
76
- img = cv2.imread(img_path)
77
- result = detector.detect_faces(img)
78
-
79
- x,y,width, heigth = result[0]['box']
80
- face = img[y:y+heigth, x:x+width]
81
- image = Image.fromarray(face)
82
- image= image.resize((244,244))
83
-
84
- face_array = np.asarray(image)
85
- face_array = face_array.astype('float32')
86
-
87
- expanded_img = np.expand_dims(face_array, axis= 0)
88
- preprocess_img = preprocess_input(expanded_img)
89
- result= model.predict(preprocess_img).flatten()
90
-
91
- return result
92
-
93
- def recommed(feature_list, features):
94
- similarity = []
95
- for i in range(len(feature_list)):
96
- similarity.append(cosine_similarity(features.reshape(1,-1), feature_list[i].reshape(1,-1))[0][0])
97
-
98
-
99
- result = sorted(list(enumerate(similarity)), reverse=True, key=lambda x: x[1])[0]
100
- index_pos = result[0]
101
- percentage = result[1]
102
-
103
- return index_pos, percentage
104
-
105
- def save_upload_image(upload_image):
106
- try:
107
- create_dir([upload_path])
108
- with open(os.path.join(upload_path, upload_image.name), 'wb') as f:
109
- f.write(upload_image.getbuffer())
110
- return True
111
- except:
112
- return False
113
-
114
- main_title = "<center><div><p class='highlight grey' style='font-size:47px'><span class='bold'>Guess your look alike celebrity</span></span></div></center>"
115
- st.markdown(main_title, unsafe_allow_html=True)
116
-
117
-
118
- uploaded_image = st.file_uploader('Choose a image')
119
- if uploaded_image is not None:
120
- if save_upload_image(uploaded_image):
121
-
122
- display_image = Image.open(uploaded_image)
123
- resized_display_img = display_image.resize((260,320), Image.ANTIALIAS)
124
-
125
-
126
- upload_image_path = os.path.join(upload_path, uploaded_image.name)
127
- features = extracted_features(upload_image_path, model, detector)
128
- img_path, percentage = recommed(feature_list, features)
129
- actor_path = filenames[img_path]
130
-
131
-
132
- predicted_actor = " ".join(actor_path.split('\\')[2].split('_'))
133
-
134
- actor_root_name = actor_path.split('\\')[2]
135
- pred_actor_path = os.path.join(data,actor_root_name, '1.jpg' )
136
-
137
- pred_actor_image = Image.open(pred_actor_path)
138
- resized_actor_img = pred_actor_image.resize((260,320), Image.ANTIALIAS)
139
-
140
- st.header(f'You look like {predicted_actor} with {np.round(percentage*100,0)}% similarity')
141
-
142
- col1, col2 = st.beta_columns(2)
143
-
144
- with col1:
145
- st.markdown("Thats you")
146
- st.image(resized_display_img)
147
-
148
- with col2:
149
- st.markdown("Your look alike celelb")
150
- st.image(resized_actor_img)
 
 
 
 
 
 
 
 
 
 
1
+ from unittest import result
2
+ from tensorflow.keras.applications.vgg16 import preprocess_input
3
+ from tensorflow.python.keras.utils.layer_utils import get_source_inputs
4
+ from tensorflow.python.keras.layers import (
5
+ Flatten, Dense, Input, GlobalAveragePooling2D,
6
+ GlobalMaxPooling2D, Activation, Conv2D, MaxPooling2D,
7
+ BatchNormalization, AveragePooling2D, Reshape, Permute, multiply
8
+ )
9
+ from tensorflow.python.keras.models import Model
10
+ from tensorflow.python.keras import backend as K
11
+ from tensorflow.python.keras.utils.data_utils import get_file
12
+ from keras_vggface.vggface import VGGFace
13
+ from src.utils.all_utils import read_yaml, create_dir
14
+ import pickle
15
+ from sklearn.metrics.pairwise import cosine_similarity
16
+ import streamlit as st
17
+ from PIL import Image
18
+ import os
19
+ import cv2
20
+ from mtcnn import MTCNN
21
+ import numpy as np
22
+ import base64
23
+ from static.load_css import local_css
24
+
25
+
26
+ local_css("static/style.css")
27
+
28
+ def get_base64(bin_file):
29
+ with open(bin_file, 'rb') as f:
30
+ data = f.read()
31
+ return base64.b64encode(data).decode()
32
+
33
+ def set_background(png_file):
34
+ bin_str = get_base64(png_file)
35
+ page_bg_img = '''
36
+ <style>
37
+ body {
38
+ background-image: url("data:image/png;base64,%s");
39
+ background-size: cover;
40
+ }
41
+ </style>
42
+ ''' % bin_str
43
+ st.markdown(page_bg_img, unsafe_allow_html=True)
44
+
45
+ set_background('artifacts/images.jpg')
46
+
47
+
48
+ config = read_yaml("config/config.yaml")
49
+ params = read_yaml('params.yaml')
50
+
51
+ artifacts = config['artifacts']
52
+ artifacts_dirs = artifacts['artifacts_dir']
53
+
54
+ upload_image_dir = artifacts['upload_image_dir']
55
+ upload_path = os.path.join(artifacts_dirs, upload_image_dir)
56
+
57
+ pickle_format_dirs = artifacts['pickle_format_data_dir']
58
+ img_pickle_file_name = artifacts['img_pickle_file_name']
59
+ pickle_actor_name = artifacts['pickle_actor_names']
60
+
61
+ pickle_dir_path = os.path.join(artifacts_dirs, pickle_format_dirs)
62
+ pickle_file = os.path.join(pickle_dir_path, img_pickle_file_name)
63
+ pickle_actor = os.path.join(pickle_dir_path, pickle_actor_name)
64
+
65
+ feature_extractor_dir = artifacts['feature_extraction_dir']
66
+ extracted_feature_name = artifacts['extracted_features_name']
67
+
68
+ feature_extractor_path = os.path.join(artifacts_dirs, feature_extractor_dir)
69
+ feature_name = os.path.join(feature_extractor_path, extracted_feature_name)
70
+
71
+ data = params['base']['data_path']
72
+
73
+ model_name = params['base']['BASE_MODEL']
74
+ include_top = params['base']['include_top']
75
+ pooling = params['base']['pooling']
76
+
77
+ detector = MTCNN()
78
+ model = VGGFace(model= model_name, include_top=include_top, input_shape=(244,244,3), pooling=pooling)
79
+
80
+ filenames = pickle.load(open(pickle_file, 'rb'))
81
+ feature_list = pickle.load(open(feature_name, 'rb'))
82
+ actor_names = pickle.load(open(pickle_actor, 'rb'))
83
+
84
+ def extracted_features(img_path, model, detector):
85
+ img = cv2.imread(img_path)
86
+ result = detector.detect_faces(img)
87
+
88
+ x,y,width, heigth = result[0]['box']
89
+ face = img[y:y+heigth, x:x+width]
90
+ image = Image.fromarray(face)
91
+ image= image.resize((244,244))
92
+
93
+ face_array = np.asarray(image)
94
+ face_array = face_array.astype('float32')
95
+
96
+ expanded_img = np.expand_dims(face_array, axis= 0)
97
+ preprocess_img = preprocess_input(expanded_img)
98
+ result= model.predict(preprocess_img).flatten()
99
+
100
+ return result
101
+
102
+ def recommed(feature_list, features):
103
+ similarity = []
104
+ for i in range(len(feature_list)):
105
+ similarity.append(cosine_similarity(features.reshape(1,-1), feature_list[i].reshape(1,-1))[0][0])
106
+
107
+
108
+ result = sorted(list(enumerate(similarity)), reverse=True, key=lambda x: x[1])[0]
109
+ index_pos = result[0]
110
+ percentage = result[1]
111
+
112
+ return index_pos, percentage
113
+
114
+ def save_upload_image(upload_image):
115
+ try:
116
+ create_dir([upload_path])
117
+ with open(os.path.join(upload_path, upload_image.name), 'wb') as f:
118
+ f.write(upload_image.getbuffer())
119
+ return True
120
+ except:
121
+ return False
122
+
123
+ main_title = "<center><div><p class='highlight grey' style='font-size:47px'><span class='bold'>Guess your look alike celebrity</span></span></div></center>"
124
+ st.markdown(main_title, unsafe_allow_html=True)
125
+
126
+
127
+ uploaded_image = st.file_uploader('Choose a image')
128
+ if uploaded_image is not None:
129
+ if save_upload_image(uploaded_image):
130
+
131
+ display_image = Image.open(uploaded_image)
132
+ resized_display_img = display_image.resize((260,320), Image.ANTIALIAS)
133
+
134
+
135
+ upload_image_path = os.path.join(upload_path, uploaded_image.name)
136
+ features = extracted_features(upload_image_path, model, detector)
137
+ img_path, percentage = recommed(feature_list, features)
138
+ actor_path = filenames[img_path]
139
+
140
+
141
+ predicted_actor = " ".join(actor_path.split('\\')[2].split('_'))
142
+
143
+ actor_root_name = actor_path.split('\\')[2]
144
+ pred_actor_path = os.path.join(data,actor_root_name, '1.jpg' )
145
+
146
+ pred_actor_image = Image.open(pred_actor_path)
147
+ resized_actor_img = pred_actor_image.resize((260,320), Image.ANTIALIAS)
148
+
149
+ st.header(f'You look like {predicted_actor} with {np.round(percentage*100,0)}% similarity')
150
+
151
+ col1, col2 = st.beta_columns(2)
152
+
153
+ with col1:
154
+ st.markdown("Thats you")
155
+ st.image(resized_display_img)
156
+
157
+ with col2:
158
+ st.markdown("Your look alike celelb")
159
+ st.image(resized_actor_img)