swap_face / app.py
sandrocalzada's picture
Update app.py
827c688
raw
history blame
No virus
3.4 kB
pip install insightface
pip install onnxruntime
import gradio as gr
import numpy as np
import tensorflow as tf
import cv2
import numpy as np
import os
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
# Load your trained model
#model = tf.keras.models.load_model('path_to_your_model.h5')
def predict_gender(image):
# Convert image to format expected by your model & preprocess
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
img = cv2.resize(img, (224, 224)) # Example size
img = img / 255.0 # Normalizing
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)
# Assuming binary classification with a single output neuron
return "Male" if prediction[0] < 0.5 else "Female"
def predict(video_in, image_in_video, image_in_img):
if video_in == None and image_in_video == None and image_in_img == None:
raise gr.Error("Please upload a video or image.")
if image_in_video or image_in_img:
print("image", image_in_video, image_in_img)
image = image_in_video or image_in_img
return image
return video_in
def toggle(choice):
if choice == "webcam":
return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
else:
return gr.update(visible=False, value=None), gr.update(visible=True, value=None)
with gr.Blocks() as blocks:
gr.Markdown("### Video or Image? WebCam or Upload?""")
with gr.Tab("Video") as tab:
with gr.Row():
with gr.Column():
video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
label="How would you like to upload your video?")
video_in = gr.Video(source="webcam", include_audio=False)
video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt,
outputs=video_in, queue=False, show_progress=False)
with gr.Column():
video_out = gr.Video()
run_btn = gr.Button("Run")
run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
gr.Examples(fn=predict, examples=[], inputs=[
video_in], outputs=[video_out])
with gr.Tab("Image"):
with gr.Row():
with gr.Column():
image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
label="How would you like to upload your image?")
image_in_video = gr.Image(source="webcam", type="filepath")
image_in_img = gr.Image(
source="upload", visible=False, type="filepath")
image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
with gr.Column():
image_out = gr.Image()
run_btn = gr.Button("Run")
run_btn.click(fn=predict, inputs=[
image_in_img, image_in_video], outputs=[image_out])
gr.Examples(fn=predict, examples=[], inputs=[
image_in_img, image_in_video], outputs=[image_out])
blocks.queue()
blocks.launch()