File size: 2,350 Bytes
30fbe46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b4c9da
30fbe46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0284ac
 
 
30fbe46
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import cv2
import gradio as gr
import io
import numpy as np

from z_app_factory import get_app

thickness = 3
lineType = 8
font = cv2.FONT_HERSHEY_SIMPLEX

def inference(image):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    dic_res = get_app(image)
    code = dic_res["code"]
    data = dic_res["data"]

    msg = "aaa"
    if code == 401:
        msg = "Not RGB three channel picture"
    elif code == 402:
        msg = "Pixels less than 32 * 32"
    elif code == 403:
        msg = "Pixels greater than 4096 * 4096"
    elif code == 404:
        msg = "Files greater than 5MB"
    elif code == 405:
        msg = "System error, please contact the server for troubleshooting"
    import numpy as np
    img_out = np.zeros((500, 600, 3), dtype=np.uint8)
    if code!=200:
        cv2.putText(img_out, msg, (20, 200), font, 1, (0, 255, 0), 2)
        return img_out

    if code==200 and not data:
        cv2.putText(img_out, "no face detected", (20, 200), font, 1, (0, 255, 0), 2)
        return img_out

    lst_bbox = data['bbox']
    lst_score = data['score']
    for idx, image_bbox in enumerate(lst_bbox):
        score = lst_score[idx]
        cv2.rectangle(
            image,
            (image_bbox[0], image_bbox[1]),
            (image_bbox[0] + image_bbox[2], image_bbox[1] + image_bbox[3]),
            (0, int(255 * score), 0), 2)

        cv2.putText(
            image,
            str(score)[:4],
            (image_bbox[0], image_bbox[1] - 5),
            cv2.FONT_HERSHEY_SIMPLEX, 1.5 * image.shape[0] / 1024, (0, int(255 * score), 0), 2)


    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return image



title = "Detect Living Face"
description = "demo for Detect Living Face. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://www.yuque.com/itmorn/ability/detect_living_face' target='_blank'>Project Documents</a> | <a href='https://www.bilibili.com/video/BV1CW4y1n7Kg' target='_blank'>Video Demo</a></p>"

gr.Interface(
    inference,
    [gr.inputs.Image(label="Input")],
    gr.outputs.Image(type="pil", label="Output"),
    title=title,
    description=description,
    article=article,
    examples=[
              ["imgs/11.jpg"],
              ["imgs/13.jpg"],
    ]).launch(debug=True)