Upload 3 files
Browse files- services/face_match.py +44 -0
- services/facial_processing.py +67 -0
- services/recommendation_service.py +97 -0
services/face_match.py
CHANGED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
2 |
+
import numpy as np
|
3 |
+
from sqlalchemy.orm import Session
|
4 |
+
from users.models import UserEmbeddings
|
5 |
+
|
6 |
+
class FaceMatch:
|
7 |
+
def __init__(self, db: Session):
|
8 |
+
self.db = db
|
9 |
+
|
10 |
+
def load_embeddings_from_db(self):
|
11 |
+
user_embeddings = self.db.query(UserEmbeddings).all()
|
12 |
+
return {ue.user_id: np.array(ue.embeddings) for ue in user_embeddings}
|
13 |
+
|
14 |
+
def match_faces(self, new_embeddings, saved_embeddings, threshold=0.6):
|
15 |
+
new_embeddings = np.array(new_embeddings)
|
16 |
+
max_similarity = 0
|
17 |
+
identity = None
|
18 |
+
|
19 |
+
for user_id, stored_embeddings in saved_embeddings.items():
|
20 |
+
similarity = cosine_similarity(new_embeddings.reshape(1, -1), stored_embeddings.reshape(1, -1))[0][0]
|
21 |
+
if similarity > max_similarity:
|
22 |
+
max_similarity = similarity
|
23 |
+
identity = user_id
|
24 |
+
|
25 |
+
return identity, max_similarity if max_similarity > threshold else (None, 0)
|
26 |
+
|
27 |
+
def new_face_matching(self, new_embeddings):
|
28 |
+
embeddings_dict = self.load_embeddings_from_db()
|
29 |
+
if not embeddings_dict:
|
30 |
+
return {'status': 'Error', 'message': 'No embeddings available'}
|
31 |
+
|
32 |
+
identity, similarity = self.match_faces(new_embeddings, embeddings_dict)
|
33 |
+
if identity:
|
34 |
+
return {
|
35 |
+
'status': 'Success',
|
36 |
+
'message': 'Match Found',
|
37 |
+
'user_id': identity,
|
38 |
+
'similarity': similarity
|
39 |
+
}
|
40 |
+
return {
|
41 |
+
'status': 'Error',
|
42 |
+
'message': 'No matching face found'
|
43 |
+
}
|
44 |
+
|
services/facial_processing.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
from facenet_pytorch import MTCNN, InceptionResnetV1
|
5 |
+
import cv2
|
6 |
+
import logging
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
|
11 |
+
class FacialProcessing:
|
12 |
+
def __init__(self):
|
13 |
+
self.face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
14 |
+
self.model = cv2.dnn.readNetFromTorch('openface.nn4.small2.v1.t7')
|
15 |
+
# Set the cache directory to a writable location
|
16 |
+
os.environ['TORCH_HOME'] = '/tmp/.cache/torch'
|
17 |
+
|
18 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
19 |
+
|
20 |
+
self.mtcnn = MTCNN(keep_all=True, device=device)
|
21 |
+
self.resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
22 |
+
|
23 |
+
|
24 |
+
def extract_embeddings(self, image_path):
|
25 |
+
try:
|
26 |
+
image = cv2.imread(image_path)
|
27 |
+
if image is None:
|
28 |
+
logger.error(f"Failed to load image: {image_path}")
|
29 |
+
return None
|
30 |
+
|
31 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
32 |
+
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
|
33 |
+
|
34 |
+
if len(faces) == 0:
|
35 |
+
logger.warning(f"No face detected in image: {image_path}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
(x, y, w, h) = faces[0]
|
39 |
+
face = image[y:y+h, x:x+w]
|
40 |
+
|
41 |
+
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
|
42 |
+
self.model.setInput(faceBlob)
|
43 |
+
vec = self.model.forward()
|
44 |
+
|
45 |
+
return vec.flatten().tolist()
|
46 |
+
except Exception as e:
|
47 |
+
logger.error(f"An error occurred while extracting embeddings: {e}")
|
48 |
+
return None
|
49 |
+
|
50 |
+
|
51 |
+
def extract_embeddings_vgg(self, image):
|
52 |
+
try:
|
53 |
+
# Preprocess the image
|
54 |
+
preprocessed_image = self.mtcnn(image)
|
55 |
+
|
56 |
+
if preprocessed_image is None:
|
57 |
+
logger.warning(f"No face detected in image")
|
58 |
+
return None
|
59 |
+
|
60 |
+
# Extract the face embeddings
|
61 |
+
embeddings = self.resnet(preprocessed_image.unsqueeze(0)).detach().cpu().numpy().tolist()
|
62 |
+
if embeddings:
|
63 |
+
return embeddings[0]
|
64 |
+
|
65 |
+
except Exception as e:
|
66 |
+
logger.error(f"An error occurred while extracting embeddings: {e}")
|
67 |
+
return None
|
services/recommendation_service.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# recommender_system.py
|
2 |
+
import pandas as pd
|
3 |
+
from surprise import Dataset, Reader, SVD
|
4 |
+
from surprise.model_selection import train_test_split
|
5 |
+
import joblib
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
import os
|
8 |
+
import random
|
9 |
+
from sqlalchemy.orm import Session
|
10 |
+
from typing import List
|
11 |
+
from orders.models import Order, Meal
|
12 |
+
from users.models import User
|
13 |
+
|
14 |
+
|
15 |
+
class MealRecommender:
|
16 |
+
def __init__(self, db: Session):
|
17 |
+
self.db = db
|
18 |
+
self.model_path = 'recommendation_model.joblib'
|
19 |
+
self.last_train_path = 'last_train_time.txt'
|
20 |
+
self.retrain_interval = timedelta(days=1)
|
21 |
+
self.algo = self.load_or_train_model()
|
22 |
+
|
23 |
+
def fetch_data(self):
|
24 |
+
orders = self.db.query(Order).all()
|
25 |
+
return pd.DataFrame([(order.user_id, order.meal_id, order.quantity) for order in orders],
|
26 |
+
columns=['user_id', 'meal_id', 'quantity'])
|
27 |
+
|
28 |
+
def train_model(self):
|
29 |
+
data = self.fetch_data()
|
30 |
+
if data.empty:
|
31 |
+
self.algo = None
|
32 |
+
return None
|
33 |
+
|
34 |
+
reader = Reader(rating_scale=(1, 5))
|
35 |
+
dataset = Dataset.load_from_df(data[['user_id', 'meal_id', 'quantity']], reader)
|
36 |
+
|
37 |
+
trainset = dataset.build_full_trainset()
|
38 |
+
algo = SVD()
|
39 |
+
algo.fit(trainset)
|
40 |
+
|
41 |
+
joblib.dump(algo, self.model_path)
|
42 |
+
self._update_last_train_time()
|
43 |
+
return algo
|
44 |
+
|
45 |
+
def load_or_train_model(self):
|
46 |
+
try:
|
47 |
+
if self._should_retrain():
|
48 |
+
return self.train_model()
|
49 |
+
return joblib.load(self.model_path)
|
50 |
+
except FileNotFoundError:
|
51 |
+
return self.train_model()
|
52 |
+
|
53 |
+
def _should_retrain(self):
|
54 |
+
if not os.path.exists(self.last_train_path):
|
55 |
+
return True
|
56 |
+
with open(self.last_train_path, 'r') as f:
|
57 |
+
last_train_time = datetime.fromisoformat(f.read().strip())
|
58 |
+
return datetime.now() - last_train_time > self.retrain_interval
|
59 |
+
|
60 |
+
def _update_last_train_time(self):
|
61 |
+
with open(self.last_train_path, 'w') as f:
|
62 |
+
f.write(datetime.now().isoformat())
|
63 |
+
|
64 |
+
def get_recommendations(self, user: User):
|
65 |
+
if self._should_retrain():
|
66 |
+
self.algo = self.train_model()
|
67 |
+
|
68 |
+
if self.algo is None:
|
69 |
+
return self.get_random_recommendations()
|
70 |
+
|
71 |
+
all_meals = self.db.query(Meal).all()
|
72 |
+
meal_ids = [meal.id for meal in all_meals]
|
73 |
+
|
74 |
+
predictions = [self.algo.predict(str(user.id), str(meal_id)) for meal_id in meal_ids]
|
75 |
+
sorted_predictions = sorted(predictions, key=lambda x: x.est, reverse=True)
|
76 |
+
top_recommendations = self.db.query(Meal).filter(Meal.id.in_([int(pred.iid) for pred in sorted_predictions[:20]])).all()
|
77 |
+
|
78 |
+
top_recommendations = self.adjust_for_preferences(user, top_recommendations)
|
79 |
+
|
80 |
+
return top_recommendations[:5] # Return top 5 recommendations
|
81 |
+
|
82 |
+
def adjust_for_preferences(self, user: User, recommendations: List[Meal]) -> List[Meal]:
|
83 |
+
preferences = user.preferences if user.preferences else []
|
84 |
+
preference_scores = {meal.id: 0 for meal in recommendations}
|
85 |
+
|
86 |
+
for meal in recommendations:
|
87 |
+
for preferred in preferences:
|
88 |
+
if preferred.lower() in meal.name.lower() or preferred.lower() in meal.description.lower():
|
89 |
+
preference_scores[meal.id] += 1
|
90 |
+
|
91 |
+
sorted_recommendations = sorted(recommendations, key=lambda meal: preference_scores[meal.id], reverse=True)
|
92 |
+
|
93 |
+
return sorted_recommendations
|
94 |
+
|
95 |
+
def get_random_recommendations(self):
|
96 |
+
all_meals = self.db.query(Meal).all()
|
97 |
+
return random.sample(all_meals, min(5, len(all_meals)))
|