File size: 4,816 Bytes
f5f3b58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e04c39
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import gradio as gr
from PIL import Image
import os
from utils.face_rec import input_an_image, update_ind2person, image_rec
import cv2
import numpy as np
ind2person=dict()
def video_identity(video):
    return video
def str_intercept(img_path):
    img_path_ = img_path[::-1]
    point_index = 0 
    slash_index = 0  

    flag_pi = 0
    flag_si = 0

    for i in range(len(img_path_)):
        if (img_path_[i] == "." and flag_pi == 0):
            point_index = i
            flag_pi = 1

        if (img_path_[i] == "/" and flag_si == 0):
            slash_index = i
            flag_si = 1

    point_index = len(img_path) - 1 - point_index
    slash_index = len(img_path) - 1 - slash_index

    return point_index, slash_index
def face_entry(img_path, name_text):
    if img_path == "" or name_text == "" or img_path is None or name_text is None:
        return None, None, None
    #point_index, slash_index = str_intercept(img_path)
    #img_renamePath = f"{img_path[:slash_index+1]}{name_text}{img_path[point_index:]}"
    #os.rename(img_path, img_renamePath)
    #img_ = Image.open(img_renamePath)
    img_ = Image.open(img_path)

    emb = input_an_image(img_, name_text)
    update_ind2person(ind2person, emb, name_text)
    name_text='upload '+name_text+' image done!'
    return name_text

def face_rec_img(image):
    known_face_encodings=[v['emb'] for k,v in ind2person.items()]
    image = cv2.cvtColor(np.array(image),cv2.COLOR_RGB2BGR)
    image=image_rec(image,known_face_encodings=known_face_encodings,_ind2person=ind2person)
    image = Image.fromarray(cv2.cvtColor(image,cv2.COLOR_BGR2RGB))
    return image

def change_input_image(choice):
    if choice == "camera":
        return gr.Image(image_mode="RGB", source="webcam", type="filepath", label="upload face image",visible=True)
    elif choice == "upload":
        return gr.Image(image_mode="RGB", source="upload", type="filepath", label="upload face image",visible=True)
    else:
        return gr.Image.update(visible=False)

def main():
    with gr.Blocks(css='style.css') as demo:
        with gr.Row():
            gr.Markdown("Capture Face", elem_id="md1")
        with gr.Row():
            # radio = gr.Radio(["camera","upload"], label="capture face image in your camera or upload face image")
            with gr.Column(scale=2):
                with gr.Row():
                    input_img = gr.Image(image_mode="RGB", source="webcam", type="filepath", label="capture face image")
                    # input_img = gr.Image(image_mode="RGB",interactive=True)
                    # radio.change(fn=change_input_image, inputs=radio, outputs=input_img)
            with gr.Column(scale=1):
                with gr.Row():
                    input_name = gr.Textbox(label="input person name")
                with gr.Row():
                    btn = gr.Button(value="upload face image")
                with gr.Row():
                    output_name = gr.Textbox(label="echo")

        with gr.Row():
            gr.Markdown("Face Recognition", elem_id="md1")
        with gr.Row():
            
            with gr.Column():
                with gr.Row():
                    img2rec = gr.Image(image_mode="RGB", source="webcam", type="pil", label="upload face image")
                with gr.Row():
                    btn_img_rec = gr.Button(value="upload face image")
            with gr.Column():
                # with gr.Row():
                #     input_name = gr.Textbox(label="input person name")
                with gr.Row():
                    output_rec = gr.Image(image_mode="RGB", source="upload", type="pil", label="rec image")
        
        # with gr.Row():
        #     gr.Markdown("Video Face Recognition")
        # with gr.Row():
            
        #     with gr.Column():
        #         with gr.Row():
        #             img2rec = gr.Image(image_mode="RGB", source="webcam", type="pil", label="upload face image")
        #         with gr.Row():
        #             btn_img_rec = gr.Button(value="upload face image")
        #     with gr.Column():
        #         # with gr.Row():
        #         #     input_name = gr.Textbox(label="input person name")
        #         with gr.Row():
        #             output_rec = gr.Image(image_mode="RGB", source="upload", type="pil", label="rec image")

        btn.click(fn=face_entry, inputs=[input_img,input_name],outputs=[output_name])
        btn_img_rec.click(fn=face_rec_img, inputs=[img2rec], outputs=[output_rec])
    # video=gr.Video(source='webcam')
    # demo = gr.Interface(video_identity,video,"video")

    return demo
if __name__=='__main__':
    demo=main()
    # demo.launch(share=True)
    demo.launch()