itmorn's picture
init
5b4c9da
import cv2
import gradio as gr
import io
import numpy as np
from z_app_factory import get_app
thickness = 3
lineType = 8
font = cv2.FONT_HERSHEY_SIMPLEX
def inference(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
dic_res = get_app(image)
code = dic_res["code"]
data = dic_res["data"]
msg = "aaa"
if code == 401:
msg = "Not RGB three channel picture"
elif code == 402:
msg = "Pixels less than 32 * 32"
elif code == 403:
msg = "Pixels greater than 4096 * 4096"
elif code == 404:
msg = "Files greater than 5MB"
elif code == 405:
msg = "System error, please contact the server for troubleshooting"
import numpy as np
img_out = np.zeros((500, 600, 3), dtype=np.uint8)
if code!=200:
cv2.putText(img_out, msg, (20, 200), font, 1, (0, 255, 0), 2)
return img_out
if code==200 and not data:
cv2.putText(img_out, "no face detected", (20, 200), font, 1, (0, 255, 0), 2)
return img_out
lst_bbox = data['bbox']
lst_score = data['score']
for idx, image_bbox in enumerate(lst_bbox):
score = lst_score[idx]
cv2.rectangle(
image,
(image_bbox[0], image_bbox[1]),
(image_bbox[0] + image_bbox[2], image_bbox[1] + image_bbox[3]),
(0, int(255 * score), 0), 2)
cv2.putText(
image,
str(score)[:4],
(image_bbox[0], image_bbox[1] - 5),
cv2.FONT_HERSHEY_SIMPLEX, 1.5 * image.shape[0] / 1024, (0, int(255 * score), 0), 2)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
title = "Detect Living Face"
description = "demo for Detect Living Face. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://www.yuque.com/itmorn/ability/detect_living_face' target='_blank'>Project Documents</a> | <a href='https://www.bilibili.com/video/BV1CW4y1n7Kg' target='_blank'>Video Demo</a></p>"
gr.Interface(
inference,
[gr.inputs.Image(label="Input")],
gr.outputs.Image(type="pil", label="Output"),
title=title,
description=description,
article=article,
examples=[
["imgs/11.jpg"],
["imgs/13.jpg"],
]).launch(debug=True)