File size: 2,785 Bytes
c7f097c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f124ef
c7f097c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269b6aa
c7f097c
 
 
 
 
 
ca2b82d
c7f097c
1f124ef
6662d6e
1f124ef
c7f097c
1f124ef
 
 
 
ca2b82d
 
1f124ef
 
 
 
 
 
 
 
 
ca2b82d
1f124ef
 
 
 
 
 
c7f097c
1f124ef
c7f097c
 
 
 
 
 
 
 
 
 
 
09cbbab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from pydoc import describe
from huggingface_hub import hf_hub_download
import gradio as gr
import subprocess
import os
import datetime
from PIL import Image
from remove_bg import RemoveBackground

net_C = hf_hub_download("radames/PIFu-upright-standing", filename="net_C")
net_G = hf_hub_download("radames/PIFu-upright-standing", filename="net_G")

remove_bg = RemoveBackground()

env = {
    **os.environ,
    "CHECKPOINTS_NETG_PATH": net_G,
    "CHECKPOINTS_NETC_PATH": net_C,
    "RESULTS_PATH": './results',
}


def process(img_path):
    base = os.path.basename(img_path)
    img_name = os.path.splitext(base)[0]
    print("image name", img_name)
    img = Image.open(img_path)
    # remove background
    print("remove background")
    foreground = Image.fromarray(remove_bg.inference(img), 'RGBA')
    foreground.save("./PIFu/inputs/" + img_name + ".png")
    print("align mask with input training image")
    subprocess.Popen(["python", "./apps/crop_img.py", "--input_image",
                      f'./inputs/{img_name}.png', "--out_path", "./inputs"], cwd="PIFu").communicate()

    print("generate 3D model")
    subprocess.Popen("./scripts/test.sh", env={
                     **env,
                     "INPUT_IMAGE_PATH": f'./inputs/{img_name}.png',
                     "VOL_RES": "256"},
                     cwd="PIFu").communicate()

    print("inference")
    return f'./PIFu/results/spaces_demo/result_{img_name}.glb'


examples = [["./examples/" + img] for img in sorted(os.listdir("./examples/"))]
description = '''
# PIFu Clothed Human Digitization
#### PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization
<base target="_blank">

This is a demo for <a href="https://github.com/shunsukesaito/PIFu" target="_blank"> PIFu model </a>.
The pre-trained model has the following warning:
> Warning: The released model is trained with mostly upright standing scans with weak perspectie projection and the pitch angle of 0 degree. Reconstruction quality may degrade for images highly deviated from trainining data.

**The inference takes about 180seconds for a new image.**

<details> 
<summary>More</summary>

#### Image Credits

* Julien and Clem
* [StyleGAN Humans](https://huggingface.co/spaces/hysts/StyleGAN-Human)
* [Renderpeople: Dennis](https://renderpeople.com)


#### More
* https://phorhum.github.io/
* https://github.com/yuliangxiu/icon
* https://shunsukesaito.github.io/PIFuHD/

</details>
'''

iface = gr.Interface(
    fn=process,
    description=description,
    inputs=gr.Image(type="filepath", label="Input"),
    outputs=gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0]),
    examples=examples,
    allow_flagging="never",
    cache_examples=True
)

if __name__ == "__main__":
    iface.launch(debug=True, enable_queue=True)