|
|
|
|
|
|
|
import glob |
|
import gradio as gr |
|
import os |
|
import numpy as np |
|
|
|
import subprocess |
|
|
|
if os.getenv('SYSTEM') == 'spaces': |
|
subprocess.run('pip install pyembree'.split()) |
|
subprocess.run( |
|
'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split()) |
|
subprocess.run( |
|
'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split()) |
|
subprocess.run( |
|
'pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl'.split()) |
|
subprocess.run( |
|
'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split()) |
|
|
|
from apps.infer import generate_model |
|
|
|
|
|
|
|
description = ''' |
|
# ICON Clothed Human Digitization |
|
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022) |
|
|
|
<table> |
|
<th> |
|
<ul> |
|
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li> |
|
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a></li> |
|
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a></li> |
|
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li> |
|
<li><strong>Colab Notebook</strong> <a href="https://colab.research.google.com/drive/1-AWeWhPvCTBX0KfMtgtMk10uPU05ihoA?usp=sharing">Google Colab</a></li> |
|
</ul> |
|
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a> |
|
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe> |
|
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a> |
|
</th> |
|
<th> |
|
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> |
|
</th> |
|
</table> |
|
|
|
<h4> The reconstruction + refinement + video take about 200 seconds for single image. <span style="color:red"> If ERROR, try "Submit Image" again.</span></h4> |
|
|
|
<details> |
|
|
|
<summary>More</summary> |
|
|
|
#### Citation |
|
``` |
|
@inproceedings{xiu2022icon, |
|
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals}, |
|
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.}, |
|
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, |
|
month = {June}, |
|
year = {2022}, |
|
pages = {13296-13306} |
|
} |
|
``` |
|
|
|
#### Acknowledgments: |
|
|
|
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/) |
|
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu) |
|
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization) |
|
|
|
#### Image Credits |
|
|
|
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) |
|
|
|
#### Related works |
|
|
|
* [ICON @ MPI](https://icon.is.tue.mpg.de/) |
|
* [MonoPort @ USC](https://xiuyuliang.cn/monoport) |
|
* [Phorhum @ Google](https://phorhum.github.io/) |
|
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) |
|
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html) |
|
|
|
</details> |
|
''' |
|
|
|
|
|
def generate_image(seed, psi): |
|
iface = gr.Interface.load("spaces/hysts/StyleGAN-Human") |
|
img = iface(seed, psi) |
|
return img |
|
|
|
|
|
model_types = ['ICON', 'PIFu', 'PaMIR'] |
|
examples_names = glob.glob('examples/*.png') |
|
examples_types = np.random.choice( |
|
model_types, len(examples_names), p=[0.6, 0.2, 0.2]) |
|
|
|
examples = [list(item) for item in zip(examples_names, examples_types)] |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown(description) |
|
|
|
out_lst = [] |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Row(): |
|
with gr.Column(): |
|
seed = gr.inputs.Slider( |
|
0, 1000, step=1, default=0, label='Seed (For Image Generation)') |
|
psi = gr.inputs.Slider( |
|
0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)') |
|
radio_choice = gr.Radio( |
|
model_types, label='Method (For Reconstruction)', value='icon-filter') |
|
inp = gr.Image(type="filepath", label="Input Image") |
|
with gr.Row(): |
|
btn_sample = gr.Button("Generate Image") |
|
btn_submit = gr.Button("Submit Image") |
|
|
|
gr.Examples(examples=examples, |
|
inputs=[inp, radio_choice], |
|
cache_examples=False, |
|
fn=generate_model, |
|
outputs=out_lst) |
|
|
|
out_vid = gr.Video( |
|
label="Image + Normal + SMPL Body + Clothed Human") |
|
out_vid_download = gr.File( |
|
label="Download Video, welcome share on Twitter with #ICON") |
|
|
|
with gr.Column(): |
|
overlap_inp = gr.Image( |
|
type="filepath", label="Image Normal Overlap") |
|
out_final = gr.Model3D( |
|
clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human") |
|
out_final_download = gr.File( |
|
label="Download clothed human mesh") |
|
out_smpl = gr.Model3D( |
|
clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body") |
|
out_smpl_download = gr.File(label="Download SMPL body mesh") |
|
out_smpl_npy_download = gr.File(label="Download SMPL params") |
|
|
|
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, |
|
out_final, out_final_download, out_vid, out_vid_download, overlap_inp] |
|
|
|
btn_submit.click(fn=generate_model, inputs=[ |
|
inp, radio_choice], outputs=out_lst) |
|
btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp) |
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
|
|
demo.launch(debug=True, enable_queue=True) |
|
|