Spaces:
Runtime error
Runtime error
import tensorflow as tf | |
from tensorflow.keras.models import load_model, Model | |
# Function to preprocess the image | |
def preprocess_image(filename, target_shape=(160, 160)): | |
image_string = tf.io.read_file(filename) | |
image = tf.image.decode_jpeg(image_string, channels=3) | |
image = tf.image.convert_image_dtype(image, tf.float32) | |
image = tf.image.resize(image, target_shape) | |
return image | |
# Load the base FaceNet model | |
facenet_model = load_model('facenet_keras.h5', compile=False) | |
# Create the embedding model using the FaceNet model | |
embedding = Model(inputs=facenet_model.input, | |
outputs=facenet_model.layers[-2].output, | |
name="Embedding") | |
# Load the weights for your siamese or modified FaceNet model | |
embedding.load_weights('facenet_siamese_embedding.h5') | |
# Set all layers to non-trainable | |
for layer in embedding.layers: | |
layer.trainable = False | |
# Function to generate embedding | |
def generate_embedding(image_path, model): | |
preprocessed_image = preprocess_image(image_path) | |
preprocessed_image = tf.expand_dims(preprocessed_image, axis=0) # Add batch dimension | |
embedding = model(preprocessed_image) | |
return embedding | |
# Example usage | |
image_path = 'iman.jpg' # Update with your image's path | |
image_embedding = generate_embedding(image_path, embedding) | |
print("Generated Embedding:", image_embedding.numpy()) | |