Upload 104 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- decalib/DECA/.gitignore +49 -0
- decalib/DECA/Detailed_Expression_Capture_and_Animation.ipynb +208 -0
- decalib/DECA/Doc/images/DECA_evaluation_github.png +0 -0
- decalib/DECA/Doc/images/DECA_performance.png +0 -0
- decalib/DECA/Doc/images/IMG_0392_inputs_vis.jpg +0 -0
- decalib/DECA/Doc/images/id04657-PPHljWCZ53c-000565_inputs_inputs_vis.jpg +0 -0
- decalib/DECA/Doc/images/soubhik.gif +3 -0
- decalib/DECA/Dockerfile +25 -0
- decalib/DECA/LICENSE +88 -0
- decalib/DECA/README.md +169 -0
- decalib/DECA/TestSamples/AFLW2000/image00181.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image00202.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image00302.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image00408.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image00480.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image02024.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image02202.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image02246.jpg +0 -0
- decalib/DECA/TestSamples/AFLW2000/image04085.jpg +0 -0
- decalib/DECA/TestSamples/examples/IMG_0392_inputs.jpg +0 -0
- decalib/DECA/TestSamples/examples/alfw1.png +0 -0
- decalib/DECA/TestSamples/examples/alfw2.png +0 -0
- decalib/DECA/TestSamples/examples/id04657-PPHljWCZ53c-000565_inputs_inputs.jpg +0 -0
- decalib/DECA/TestSamples/examples/id06692-Hlahj5abifM-002721_inputs_inputs.jpg +0 -0
- decalib/DECA/TestSamples/examples/id08392-DGGTJx470Ag-005451_inputs_inputs.jpg +0 -0
- decalib/DECA/TestSamples/examples/id08456-SEvpX9LhEP4-000957_inputs_inputs.jpg +0 -0
- decalib/DECA/TestSamples/examples/image02673.png +0 -0
- decalib/DECA/TestSamples/examples/image03786.png +0 -0
- decalib/DECA/TestSamples/exp/0.jpg +0 -0
- decalib/DECA/TestSamples/exp/1.jpg +0 -0
- decalib/DECA/TestSamples/exp/2.jpg +0 -0
- decalib/DECA/TestSamples/exp/3.jpg +0 -0
- decalib/DECA/TestSamples/exp/4.jpg +0 -0
- decalib/DECA/TestSamples/exp/5.jpg +0 -0
- decalib/DECA/TestSamples/exp/6.jpg +0 -0
- decalib/DECA/TestSamples/exp/7.jpg +0 -0
- decalib/DECA/TestSamples/exp/8.jpg +0 -0
- decalib/DECA/TestSamples/teaser/0.jpg +3 -0
- decalib/DECA/TestSamples/teaser/1.jpg +3 -0
- decalib/DECA/TestSamples/teaser/2.png +0 -0
- decalib/DECA/TestSamples/teaser/results/teaser.gif +3 -0
- decalib/DECA/configs/release_version/deca_coarse.yml +24 -0
- decalib/DECA/configs/release_version/deca_detail.yml +29 -0
- decalib/DECA/configs/release_version/deca_pretrain.yml +33 -0
- decalib/DECA/data/FLAME2020/Readme.pdf +0 -0
- decalib/DECA/data/fixed_displacement_256.npy +3 -0
- decalib/DECA/data/head_template.obj +0 -0
- decalib/DECA/data/landmark_embedding.npy +3 -0
- decalib/DECA/data/mean_texture.jpg +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
decalib/DECA/Doc/images/soubhik.gif filter=lfs diff=lfs merge=lfs -text
|
37 |
+
decalib/DECA/TestSamples/teaser/0.jpg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
decalib/DECA/TestSamples/teaser/1.jpg filter=lfs diff=lfs merge=lfs -text
|
39 |
+
decalib/DECA/TestSamples/teaser/results/teaser.gif filter=lfs diff=lfs merge=lfs -text
|
decalib/DECA/.gitignore
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Compiled source #
|
2 |
+
###################
|
3 |
+
*.o
|
4 |
+
*.so
|
5 |
+
|
6 |
+
# Packages #
|
7 |
+
############
|
8 |
+
# it's better to unpack these files and commit the raw source
|
9 |
+
# git has its own built in compression methods
|
10 |
+
*.7z
|
11 |
+
*.dmg
|
12 |
+
*.gz
|
13 |
+
*.iso
|
14 |
+
*.jar
|
15 |
+
*.rar
|
16 |
+
*.tar
|
17 |
+
*.zip
|
18 |
+
|
19 |
+
# OS generated files #
|
20 |
+
######################
|
21 |
+
.DS_Store
|
22 |
+
.DS_Store?
|
23 |
+
._*
|
24 |
+
.Spotlight-V100
|
25 |
+
.Trashes
|
26 |
+
ehthumbs.db
|
27 |
+
Thumbs.db
|
28 |
+
.vscode
|
29 |
+
|
30 |
+
# 3D data #
|
31 |
+
############
|
32 |
+
*.mat
|
33 |
+
*.pkl
|
34 |
+
*.obj
|
35 |
+
*.dat
|
36 |
+
*.npz
|
37 |
+
|
38 |
+
# python file #
|
39 |
+
############
|
40 |
+
*.pyc
|
41 |
+
__pycache__
|
42 |
+
|
43 |
+
## deca data
|
44 |
+
*results*
|
45 |
+
# *_vis.jpg
|
46 |
+
## internal use
|
47 |
+
cluster_scripts
|
48 |
+
internal
|
49 |
+
*TestSamples*
|
decalib/DECA/Detailed_Expression_Capture_and_Animation.ipynb
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"name": "Copy of Detailed Expression Capture and Animation.ipynb",
|
7 |
+
"private_outputs": true,
|
8 |
+
"provenance": [],
|
9 |
+
"collapsed_sections": [],
|
10 |
+
"toc_visible": true,
|
11 |
+
"include_colab_link": true
|
12 |
+
},
|
13 |
+
"kernelspec": {
|
14 |
+
"name": "python3",
|
15 |
+
"display_name": "Python 3"
|
16 |
+
},
|
17 |
+
"language_info": {
|
18 |
+
"name": "python"
|
19 |
+
},
|
20 |
+
"accelerator": "GPU"
|
21 |
+
},
|
22 |
+
"cells": [
|
23 |
+
{
|
24 |
+
"cell_type": "markdown",
|
25 |
+
"metadata": {
|
26 |
+
"id": "view-in-github",
|
27 |
+
"colab_type": "text"
|
28 |
+
},
|
29 |
+
"source": [
|
30 |
+
"<a href=\"https://colab.research.google.com/github/YadiraF/DECA/blob/master/Detailed_Expression_Capture_and_Animation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"cell_type": "markdown",
|
35 |
+
"source": [
|
36 |
+
"<center>\n",
|
37 |
+
"\n",
|
38 |
+
"# DECA: Detailed Expression Capture and Animation\n",
|
39 |
+
"\n",
|
40 |
+
"Code in [](https://github.com/YadiraF/DECA)\n",
|
41 |
+
"\n",
|
42 |
+
"Page at [](https://deca.is.tue.mpg.de/)\n",
|
43 |
+
"\n",
|
44 |
+
"Made by [](https://twitter.com/yaofeng1995)\n",
|
45 |
+
"\n",
|
46 |
+
"\n",
|
47 |
+
"\n",
|
48 |
+
"</center>\n",
|
49 |
+
"\n",
|
50 |
+
"Thanks [mhoangvslev](https://github.com/mhoangvslev) for contributing to this Colab document. "
|
51 |
+
],
|
52 |
+
"metadata": {
|
53 |
+
"id": "LKXziS2IzAg2"
|
54 |
+
}
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "markdown",
|
58 |
+
"source": [
|
59 |
+
"# Setup"
|
60 |
+
],
|
61 |
+
"metadata": {
|
62 |
+
"id": "j0hyU8s5f_SB"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"source": [
|
68 |
+
"%tensorflow_version 1.x"
|
69 |
+
],
|
70 |
+
"metadata": {
|
71 |
+
"id": "x8Gm4LXA3Fay"
|
72 |
+
},
|
73 |
+
"execution_count": null,
|
74 |
+
"outputs": []
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"cell_type": "code",
|
78 |
+
"execution_count": null,
|
79 |
+
"metadata": {
|
80 |
+
"id": "bB5Ii6bkBYWc"
|
81 |
+
},
|
82 |
+
"outputs": [],
|
83 |
+
"source": [
|
84 |
+
"#@title Setup dependencies\n",
|
85 |
+
"\n",
|
86 |
+
"%cd /content/\n",
|
87 |
+
"!git clone https://github.com/yadiraf/DECA\n",
|
88 |
+
"\n",
|
89 |
+
"%cd DECA/\n",
|
90 |
+
"!apt -q install -y zip unzip ffmpeg libsm6 libxext6\n",
|
91 |
+
"#!pip install -r requirements.txt\n",
|
92 |
+
"!pip install 'torch==1.6.0'\n",
|
93 |
+
"!pip install 'torchvision==0.7.0'\n",
|
94 |
+
"!pip install -q 'pytorch3d==0.2.5'\n",
|
95 |
+
"!pip install -q numpy scipy chumpy scikit-image opencv-python PyYAML face-alignment yacs kornia ninja fvcore\n",
|
96 |
+
"!pip install -q lucid>=0.2.3 gdown matplotlib\n",
|
97 |
+
"#!pip install --upgrade ipykernel"
|
98 |
+
]
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"cell_type": "code",
|
102 |
+
"source": [
|
103 |
+
"#@title Download models\n",
|
104 |
+
"#@markdown By executing this cell, you agree to the [LICENSE](https://flame.is.tue.mpg.de/modellicense.html) provided by Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V\n",
|
105 |
+
"\n",
|
106 |
+
"print(\"Downloading FLAME2020 model...\")\n",
|
107 |
+
"!gdown --id 18eHkbq2W3aJJVUNTM3QwFK0dPaeNoRAz -O FLAME2020.zip\n",
|
108 |
+
"!unzip -o FLAME2020.zip -d data/\n",
|
109 |
+
"\n",
|
110 |
+
"print(\"Downloading deca_model...\")\n",
|
111 |
+
"!gdown --id 1rp8kdyLPvErw2dTmqtjISRVvQLj6Yzje -O data/deca_model.tar"
|
112 |
+
],
|
113 |
+
"metadata": {
|
114 |
+
"id": "ZmSRqqrvCIwx"
|
115 |
+
},
|
116 |
+
"execution_count": null,
|
117 |
+
"outputs": []
|
118 |
+
},
|
119 |
+
{
|
120 |
+
"cell_type": "markdown",
|
121 |
+
"source": [
|
122 |
+
"# Face reconstruction"
|
123 |
+
],
|
124 |
+
"metadata": {
|
125 |
+
"id": "TuLFayzVf6h-"
|
126 |
+
}
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"cell_type": "code",
|
130 |
+
"source": [
|
131 |
+
"#@title Run paper demo\n",
|
132 |
+
"print(\"Setting up...\")\n",
|
133 |
+
"!pip install -q kornia==0.4.0 yacs==0.1.8 face_alignment ninja fvcore\n",
|
134 |
+
"\n",
|
135 |
+
"print(\"Check for NVIDIA Driver...\")\n",
|
136 |
+
"!nvidia-smi\n",
|
137 |
+
"\n",
|
138 |
+
"print(\"Running experiments...\")\n",
|
139 |
+
"import os\n",
|
140 |
+
"input_folder = \"TestSamples/AFLW2000\" #@param {type:\"string\"}\n",
|
141 |
+
"output_folder = os.path.join(input_folder, \"results\")\n",
|
142 |
+
"!python demos/demo_reconstruct.py -i $input_folder -s $output_folder --saveDepth True --saveObj True\n"
|
143 |
+
],
|
144 |
+
"metadata": {
|
145 |
+
"id": "iiFP_JPZHjVf"
|
146 |
+
},
|
147 |
+
"execution_count": null,
|
148 |
+
"outputs": []
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"cell_type": "code",
|
152 |
+
"source": [
|
153 |
+
"#@title Use your own image\n",
|
154 |
+
"#@markdown Upload your images to `upload` folder under `DECA`\n",
|
155 |
+
"print(\"Setting up...\")\n",
|
156 |
+
"!pip install -q kornia yacs face_alignment ninja fvcore\n",
|
157 |
+
"\n",
|
158 |
+
"print(\"Check for NVIDIA Driver...\")\n",
|
159 |
+
"!nvidia-smi\n",
|
160 |
+
"\n",
|
161 |
+
"print(\"Running experiments...\")\n",
|
162 |
+
"import os\n",
|
163 |
+
"input_folder = \"/content/\" #@param {type:\"string\"}\n",
|
164 |
+
"output_folder = os.path.join(input_folder, \"results\")\n",
|
165 |
+
"!python demos/demo_reconstruct.py -i $input_folder -s $output_folder --saveDepth True --saveObj True\n",
|
166 |
+
"\n",
|
167 |
+
"## show results \n",
|
168 |
+
"print('visualize one exmaple below')\n",
|
169 |
+
"import matplotlib.pyplot as plt\n",
|
170 |
+
"from glob import glob\n",
|
171 |
+
"from PIL import Image\n",
|
172 |
+
"vispath_list = glob(output_folder+'/*_size.jpg')\n",
|
173 |
+
"for vispath in vispath_list:\n",
|
174 |
+
" image = Image.open(vispath)\n",
|
175 |
+
" plt.figure(figsize=(20, 20))\n",
|
176 |
+
" plt.imshow(image)\n",
|
177 |
+
" plt.axis(\"off\");\n",
|
178 |
+
" break\n",
|
179 |
+
"print(f'Please check all results in {output_folder}')\n"
|
180 |
+
],
|
181 |
+
"metadata": {
|
182 |
+
"id": "Ny9lRkxefRPz"
|
183 |
+
},
|
184 |
+
"execution_count": null,
|
185 |
+
"outputs": []
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"cell_type": "code",
|
189 |
+
"metadata": {
|
190 |
+
"id": "zuBCgeH08tdn"
|
191 |
+
},
|
192 |
+
"source": [
|
193 |
+
"#@title Download the result\n",
|
194 |
+
"import os\n",
|
195 |
+
"from google.colab import files\n",
|
196 |
+
"\n",
|
197 |
+
"print(next(os.walk(output_folder)))\n",
|
198 |
+
"folders = [ os.path.join(output_folder, f) for f in next(os.walk(os.path.join(input_folder, 'results')))[1] ]\n",
|
199 |
+
"\n",
|
200 |
+
"print(f'Download results...')\n",
|
201 |
+
"os.system(f'zip -r DECA_results.zip {\" \".join(folders)}')\n",
|
202 |
+
"files.download(\"DECA_results.zip\")"
|
203 |
+
],
|
204 |
+
"execution_count": null,
|
205 |
+
"outputs": []
|
206 |
+
}
|
207 |
+
]
|
208 |
+
}
|
decalib/DECA/Doc/images/DECA_evaluation_github.png
ADDED
![]() |
decalib/DECA/Doc/images/DECA_performance.png
ADDED
![]() |
decalib/DECA/Doc/images/IMG_0392_inputs_vis.jpg
ADDED
![]() |
decalib/DECA/Doc/images/id04657-PPHljWCZ53c-000565_inputs_inputs_vis.jpg
ADDED
![]() |
decalib/DECA/Doc/images/soubhik.gif
ADDED
![]() |
Git LFS Details
|
decalib/DECA/Dockerfile
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
|
2 |
+
|
3 |
+
RUN apt-get update && apt-get install -y wget curl git build-essential
|
4 |
+
|
5 |
+
# Miniconda
|
6 |
+
# ENV PATH="/root/miniconda3/bin:${PATH}"
|
7 |
+
# ARG PATH="/root/miniconda3/bin:${PATH}"
|
8 |
+
# RUN wget \
|
9 |
+
# https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
10 |
+
# && mkdir /root/.conda \
|
11 |
+
# && bash Miniconda3-latest-Linux-x86_64.sh -b \
|
12 |
+
# && rm -f Miniconda3-latest-Linux-x86_64.sh
|
13 |
+
|
14 |
+
# RUN conda create -n xp
|
15 |
+
# SHELL ["conda", "run", "--no-capture-output", "-n", "xp", "/bin/bash", "-c"]
|
16 |
+
# RUN conda install python=3.7 \
|
17 |
+
# && conda install -c conda-forge jupyterlab \
|
18 |
+
# && conda init bash \
|
19 |
+
# && echo "conda activate xp" >> ~/.bashrc
|
20 |
+
|
21 |
+
RUN pip install ipykernel jupyterlab jupyter_http_over_ws \
|
22 |
+
&& jupyter serverextension enable --py jupyter_http_over_ws
|
23 |
+
|
24 |
+
WORKDIR /content/
|
25 |
+
COPY . /content/DECA/
|
decalib/DECA/LICENSE
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
License
|
2 |
+
|
3 |
+
Software Copyright License for non-commercial scientific research purposes
|
4 |
+
Please read carefully the following terms and conditions and any accompanying documentation before you download
|
5 |
+
and/or use the DECA model, data and software, (the "Model & Software"), including 3D meshes, software, and scripts.
|
6 |
+
By downloading and/or using the Model & Software (including downloading, cloning, installing, and any other use
|
7 |
+
of this github repository), you acknowledge that you have read these terms and conditions, understand them, and
|
8 |
+
agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use
|
9 |
+
the Model & Software. Any infringement of the terms of this agreement will automatically terminate your rights
|
10 |
+
under this License
|
11 |
+
|
12 |
+
Ownership / Licensees
|
13 |
+
The Model & Software and the associated materials has been developed at the
|
14 |
+
Max Planck Institute for Intelligent Systems (hereinafter "MPI").
|
15 |
+
|
16 |
+
Any copyright or patent right is owned by and proprietary material of the
|
17 |
+
Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter
|
18 |
+
collectively “Max-Planck”) hereinafter the “Licensor”.
|
19 |
+
|
20 |
+
License Grant
|
21 |
+
Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right:
|
22 |
+
|
23 |
+
• To install the Model & Software on computers owned, leased or otherwise controlled by you and/or your organization.
|
24 |
+
• To use the Model & Software for the sole purpose of performing peaceful non-commercial scientific research,
|
25 |
+
non-commercial education, or non-commercial artistic projects.
|
26 |
+
|
27 |
+
Any other use, in particular any use for commercial, pornographic, military, or surveillance purposes is prohibited.
|
28 |
+
This includes, without limitation, incorporation in a commercial product, use in a commercial service,
|
29 |
+
or production of other artefacts for commercial purposes.
|
30 |
+
|
31 |
+
The Model & Software may not be used to create fake, libelous, misleading, or defamatory content of any kind, excluding
|
32 |
+
analyses in peer-reviewed scientific research.
|
33 |
+
|
34 |
+
The Model & Software may not be reproduced, modified and/or made available in any form to any third party
|
35 |
+
without Max-Planck’s prior written permission.
|
36 |
+
|
37 |
+
The Model & Software may not be used for pornographic purposes or to generate pornographic material whether
|
38 |
+
commercial or not. This license also prohibits the use of the Model & Software to train methods/algorithms/neural
|
39 |
+
networks/etc. for commercial use of any kind. By downloading the Model & Software, you agree not to reverse engineer it.
|
40 |
+
|
41 |
+
No Distribution
|
42 |
+
The Model & Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered
|
43 |
+
for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive
|
44 |
+
purposes only.
|
45 |
+
|
46 |
+
Disclaimer of Representations and Warranties
|
47 |
+
You expressly acknowledge and agree that the Model & Software results from basic research, is provided “AS IS”,
|
48 |
+
may contain errors, and that any use of the Model & Software is at your sole risk.
|
49 |
+
LICENSOR MAKES NO REPRESENTATIONS
|
50 |
+
OR WARRANTIES OF ANY KIND CONCERNING THE MODEL & SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY
|
51 |
+
LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor
|
52 |
+
makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of
|
53 |
+
the Model & Software, (ii) that the use of the Model & Software will not infringe any patents, copyrights or other
|
54 |
+
intellectual property rights of a third party, and (iii) that the use of the Model & Software will not cause any
|
55 |
+
damage of any kind to you or a third party.
|
56 |
+
|
57 |
+
Limitation of Liability
|
58 |
+
Because this Model & Software License Agreement qualifies as a donation, according to Section 521 of the German
|
59 |
+
Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only.
|
60 |
+
If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee
|
61 |
+
for the resulting damage.
|
62 |
+
|
63 |
+
Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have
|
64 |
+
arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be
|
65 |
+
liable in accordance with the German Product Liability Act in the event of product liability. The foregoing
|
66 |
+
applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall
|
67 |
+
be excluded. Patent claims generated through the usage of the Model & Software cannot be directed towards the copyright holders.
|
68 |
+
The Model & Software is provided in the state of development the licensor defines. If modified or extended by
|
69 |
+
Licensee, the Licensor makes no claims about the fitness of the Model & Software and is not responsible
|
70 |
+
for any problems such modifications cause.
|
71 |
+
|
72 |
+
No Maintenance Services
|
73 |
+
You understand and agree that Licensor is under no obligation to provide either maintenance services,
|
74 |
+
update services, notices of latent defects, or corrections of defects with regard to the Model & Software.
|
75 |
+
Licensor nevertheless reserves the right to update, modify, or discontinue the Model & Software at any time.
|
76 |
+
|
77 |
+
Defects of the Model & Software must be notified in writing to the Licensor with a comprehensible description
|
78 |
+
of the error symptoms. The notification of the defect should enable the reproduction of the error.
|
79 |
+
The Licensee is encouraged to communicate any use, results, modification or publication.
|
80 |
+
|
81 |
+
Publications using the Model & Software
|
82 |
+
You acknowledge that the Model & Software is a valuable scientific resource and agree to appropriately reference
|
83 |
+
the following paper in any publication making use of the Model & Software.
|
84 |
+
|
85 |
+
Commercial licensing opportunities
|
86 |
+
For commercial uses of the Model & Software, please send email to [email protected]
|
87 |
+
|
88 |
+
This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention.
|
decalib/DECA/README.md
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DECA: Detailed Expression Capture and Animation (SIGGRAPH2021)
|
2 |
+
|
3 |
+
<p align="center">
|
4 |
+
<img src="TestSamples/teaser/results/teaser.gif">
|
5 |
+
</p>
|
6 |
+
<p align="center">input image, aligned reconstruction, animation with various poses & expressions<p align="center">
|
7 |
+
|
8 |
+
[](https://colab.research.google.com/github/YadiraF/DECA/blob/master/Detailed_Expression_Capture_and_Animation.ipynb?authuser=1)
|
9 |
+
|
10 |
+
This is the official Pytorch implementation of DECA.
|
11 |
+
|
12 |
+
DECA reconstructs a 3D head model with detailed facial geometry from a single input image. The resulting 3D head model can be easily animated. Please refer to the [arXiv paper](https://arxiv.org/abs/2012.04012) for more details.
|
13 |
+
|
14 |
+
The main features:
|
15 |
+
|
16 |
+
* **Reconstruction:** produces head pose, shape, detailed face geometry, and lighting information from a single image.
|
17 |
+
* **Animation:** animate the face with realistic wrinkle deformations.
|
18 |
+
* **Robustness:** tested on facial images in unconstrained conditions. Our method is robust to various poses, illuminations and occlusions.
|
19 |
+
* **Accurate:** state-of-the-art 3D face shape reconstruction on the [NoW Challenge](https://ringnet.is.tue.mpg.de/challenge) benchmark dataset.
|
20 |
+
|
21 |
+
## Getting Started
|
22 |
+
Clone the repo:
|
23 |
+
```bash
|
24 |
+
git clone https://github.com/YadiraF/DECA
|
25 |
+
cd DECA
|
26 |
+
```
|
27 |
+
|
28 |
+
### Requirements
|
29 |
+
* Python 3.7 (numpy, skimage, scipy, opencv)
|
30 |
+
* PyTorch >= 1.6 (pytorch3d)
|
31 |
+
* face-alignment (Optional for detecting face)
|
32 |
+
You can run
|
33 |
+
```bash
|
34 |
+
pip install -r requirements.txt
|
35 |
+
```
|
36 |
+
Or use virtual environment by runing
|
37 |
+
```bash
|
38 |
+
bash install_conda.sh
|
39 |
+
```
|
40 |
+
For visualization, we use our rasterizer that uses pytorch JIT Compiling Extensions. If there occurs a compiling error, you can install [pytorch3d](https://github.com/facebookresearch/pytorch3d/blob/master/INSTALL.md) instead and set --rasterizer_type=pytorch3d when running the demos.
|
41 |
+
|
42 |
+
### Usage
|
43 |
+
1. Prepare data
|
44 |
+
run script:
|
45 |
+
```bash
|
46 |
+
bash fetch_data.sh
|
47 |
+
```
|
48 |
+
<!-- or manually download data form [FLAME 2020 model](https://flame.is.tue.mpg.de/download.php) and [DECA trained model](https://drive.google.com/file/d/1rp8kdyLPvErw2dTmqtjISRVvQLj6Yzje/view?usp=sharing), and put them in ./data -->
|
49 |
+
(Optional for Albedo)
|
50 |
+
follow the instructions for the [Albedo model](https://github.com/TimoBolkart/BFM_to_FLAME) to get 'FLAME_albedo_from_BFM.npz', put it into ./data
|
51 |
+
|
52 |
+
2. Run demos
|
53 |
+
a. **reconstruction**
|
54 |
+
```bash
|
55 |
+
python demos/demo_reconstruct.py -i TestSamples/examples --saveDepth True --saveObj True
|
56 |
+
```
|
57 |
+
to visualize the predicted 2D landmanks, 3D landmarks (red means non-visible points), coarse geometry, detailed geometry, and depth.
|
58 |
+
<p align="center">
|
59 |
+
<img src="Doc/images/id04657-PPHljWCZ53c-000565_inputs_inputs_vis.jpg">
|
60 |
+
</p>
|
61 |
+
<p align="center">
|
62 |
+
<img src="Doc/images/IMG_0392_inputs_vis.jpg">
|
63 |
+
</p>
|
64 |
+
You can also generate an obj file (which can be opened with Meshlab) that includes extracted texture from the input image.
|
65 |
+
|
66 |
+
Please run `python demos/demo_reconstruct.py --help` for more details.
|
67 |
+
|
68 |
+
b. **expression transfer**
|
69 |
+
```bash
|
70 |
+
python demos/demo_transfer.py
|
71 |
+
```
|
72 |
+
Given an image, you can reconstruct its 3D face, then animate it by tranfering expressions from other images.
|
73 |
+
Using Meshlab to open the detailed mesh obj file, you can see something like that:
|
74 |
+
<p align="center">
|
75 |
+
<img src="Doc/images/soubhik.gif">
|
76 |
+
</p>
|
77 |
+
(Thank Soubhik for allowing me to use his face ^_^)
|
78 |
+
|
79 |
+
Note that, you need to set '--useTex True' to get full texture.
|
80 |
+
|
81 |
+
c. for the [teaser gif](https://github.com/YadiraF/DECA/results/teaser.gif) (**reposing** and **animation**)
|
82 |
+
```bash
|
83 |
+
python demos/demo_teaser.py
|
84 |
+
```
|
85 |
+
|
86 |
+
More demos and training code coming soon.
|
87 |
+
|
88 |
+
## Evaluation
|
89 |
+
DECA (ours) achieves 9% lower mean shape reconstruction error on the [NoW Challenge](https://ringnet.is.tue.mpg.de/challenge) dataset compared to the previous state-of-the-art method.
|
90 |
+
The left figure compares the cumulative error of our approach and other recent methods (RingNet and Deng et al. have nearly identitical performance, so their curves overlap each other). Here we use point-to-surface distance as the error metric, following the NoW Challenge.
|
91 |
+
<p align="left">
|
92 |
+
<img src="Doc/images/DECA_evaluation_github.png">
|
93 |
+
</p>
|
94 |
+
|
95 |
+
For more details of the evaluation, please check our [arXiv paper](https://arxiv.org/abs/2012.04012).
|
96 |
+
|
97 |
+
## Training
|
98 |
+
1. Prepare Training Data
|
99 |
+
|
100 |
+
a. Download image data
|
101 |
+
In DECA, we use [VGGFace2](https://arxiv.org/pdf/1710.08092.pdf), [BUPT-Balancedface](http://www.whdeng.cn/RFW/Trainingdataste.html) and [VoxCeleb2](https://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox2.html)
|
102 |
+
|
103 |
+
b. Prepare label
|
104 |
+
[FAN](https://github.com/1adrianb/2D-and-3D-face-alignment) to predict 68 2D landmark
|
105 |
+
[face_segmentation](https://github.com/YuvalNirkin/face_segmentation) to get skin mask
|
106 |
+
|
107 |
+
c. Modify dataloader
|
108 |
+
Dataloaders for different datasets are in decalib/datasets, use the right path for prepared images and labels.
|
109 |
+
|
110 |
+
2. Download face recognition trained model
|
111 |
+
We use the model from [VGGFace2-pytorch](https://github.com/cydonia999/VGGFace2-pytorch) for calculating identity loss,
|
112 |
+
download [resnet50_ft](https://drive.google.com/file/d/1A94PAAnwk6L7hXdBXLFosB_s0SzEhAFU/view),
|
113 |
+
and put it into ./data
|
114 |
+
|
115 |
+
3. Start training
|
116 |
+
|
117 |
+
Train from scratch:
|
118 |
+
```bash
|
119 |
+
python main_train.py --cfg configs/release_version/deca_pretrain.yml
|
120 |
+
python main_train.py --cfg configs/release_version/deca_coarse.yml
|
121 |
+
python main_train.py --cfg configs/release_version/deca_detail.yml
|
122 |
+
```
|
123 |
+
In the yml files, write the right path for 'output_dir' and 'pretrained_modelpath'.
|
124 |
+
You can also use [released model](https://drive.google.com/file/d/1rp8kdyLPvErw2dTmqtjISRVvQLj6Yzje/view) as pretrained model, then ignor the pretrain step.
|
125 |
+
|
126 |
+
## Related works:
|
127 |
+
* for better emotion prediction: [EMOCA](https://github.com/radekd91/emoca)
|
128 |
+
* for better skin estimation: [TRUST](https://github.com/HavenFeng/TRUST)
|
129 |
+
|
130 |
+
## Citation
|
131 |
+
If you find our work useful to your research, please consider citing:
|
132 |
+
```
|
133 |
+
@inproceedings{DECA:Siggraph2021,
|
134 |
+
title={Learning an Animatable Detailed {3D} Face Model from In-The-Wild Images},
|
135 |
+
author={Feng, Yao and Feng, Haiwen and Black, Michael J. and Bolkart, Timo},
|
136 |
+
journal = {ACM Transactions on Graphics, (Proc. SIGGRAPH)},
|
137 |
+
volume = {40},
|
138 |
+
number = {8},
|
139 |
+
year = {2021},
|
140 |
+
url = {https://doi.org/10.1145/3450626.3459936}
|
141 |
+
}
|
142 |
+
```
|
143 |
+
|
144 |
+
<!-- ## Notes
|
145 |
+
1. Training code will also be released in the future. -->
|
146 |
+
|
147 |
+
## License
|
148 |
+
This code and model are available for non-commercial scientific research purposes as defined in the [LICENSE](https://github.com/YadiraF/DECA/blob/master/LICENSE) file.
|
149 |
+
By downloading and using the code and model you agree to the terms in the [LICENSE](https://github.com/YadiraF/DECA/blob/master/LICENSE).
|
150 |
+
|
151 |
+
## Acknowledgements
|
152 |
+
For functions or scripts that are based on external sources, we acknowledge the origin individually in each file.
|
153 |
+
Here are some great resources we benefit:
|
154 |
+
- [FLAME_PyTorch](https://github.com/soubhiksanyal/FLAME_PyTorch) and [TF_FLAME](https://github.com/TimoBolkart/TF_FLAME) for the FLAME model
|
155 |
+
- [Pytorch3D](https://pytorch3d.org/), [neural_renderer](https://github.com/daniilidis-group/neural_renderer), [SoftRas](https://github.com/ShichenLiu/SoftRas) for rendering
|
156 |
+
- [kornia](https://github.com/kornia/kornia) for image/rotation processing
|
157 |
+
- [face-alignment](https://github.com/1adrianb/face-alignment) for cropping
|
158 |
+
- [FAN](https://github.com/1adrianb/2D-and-3D-face-alignment) for landmark detection
|
159 |
+
- [face_segmentation](https://github.com/YuvalNirkin/face_segmentation) for skin mask
|
160 |
+
- [VGGFace2-pytorch](https://github.com/cydonia999/VGGFace2-pytorch) for identity loss
|
161 |
+
|
162 |
+
We would also like to thank other recent public 3D face reconstruction works that allow us to easily perform quantitative and qualitative comparisons :)
|
163 |
+
[RingNet](https://github.com/soubhiksanyal/RingNet),
|
164 |
+
[Deep3DFaceReconstruction](https://github.com/microsoft/Deep3DFaceReconstruction/blob/master/renderer/rasterize_triangles.py),
|
165 |
+
[Nonlinear_Face_3DMM](https://github.com/tranluan/Nonlinear_Face_3DMM),
|
166 |
+
[3DDFA-v2](https://github.com/cleardusk/3DDFA_V2),
|
167 |
+
[extreme_3d_faces](https://github.com/anhttran/extreme_3d_faces),
|
168 |
+
[facescape](https://github.com/zhuhao-nju/facescape)
|
169 |
+
<!-- 3DMMasSTN, DenseReg, 3dmm_cnn, vrn, pix2vertex -->
|
decalib/DECA/TestSamples/AFLW2000/image00181.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image00202.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image00302.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image00408.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image00480.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image02024.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image02202.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image02246.jpg
ADDED
![]() |
decalib/DECA/TestSamples/AFLW2000/image04085.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/IMG_0392_inputs.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/alfw1.png
ADDED
![]() |
decalib/DECA/TestSamples/examples/alfw2.png
ADDED
![]() |
decalib/DECA/TestSamples/examples/id04657-PPHljWCZ53c-000565_inputs_inputs.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/id06692-Hlahj5abifM-002721_inputs_inputs.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/id08392-DGGTJx470Ag-005451_inputs_inputs.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/id08456-SEvpX9LhEP4-000957_inputs_inputs.jpg
ADDED
![]() |
decalib/DECA/TestSamples/examples/image02673.png
ADDED
![]() |
decalib/DECA/TestSamples/examples/image03786.png
ADDED
![]() |
decalib/DECA/TestSamples/exp/0.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/1.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/2.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/3.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/4.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/5.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/6.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/7.jpg
ADDED
![]() |
decalib/DECA/TestSamples/exp/8.jpg
ADDED
![]() |
decalib/DECA/TestSamples/teaser/0.jpg
ADDED
![]() |
Git LFS Details
|
decalib/DECA/TestSamples/teaser/1.jpg
ADDED
![]() |
Git LFS Details
|
decalib/DECA/TestSamples/teaser/2.png
ADDED
![]() |
decalib/DECA/TestSamples/teaser/results/teaser.gif
ADDED
![]() |
Git LFS Details
|
decalib/DECA/configs/release_version/deca_coarse.yml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# '''
|
2 |
+
# second step:
|
3 |
+
# train the coarse model for 1.5 epochs with a batch size of 32, with 4 images per subject
|
4 |
+
# with 𝜆𝑝ℎ𝑜=2.0, 𝜆𝑖𝑑=0.2, 𝜆𝑠𝑐=1.0, 𝜆𝑙𝑚𝑘=1.0, 𝜆𝑒𝑦𝑒=1.0, 𝜆𝜷=1𝑒−4, and 𝜆𝝍=1𝑒−4.
|
5 |
+
|
6 |
+
# Time:
|
7 |
+
# around 15min/1000iterations
|
8 |
+
# in total, needs around
|
9 |
+
output_dir: "/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_release_version/coarse"
|
10 |
+
pretrained_modelpath: "/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_release_version/pretrain/model.tar"
|
11 |
+
dataset:
|
12 |
+
batch_size: 8
|
13 |
+
K: 2
|
14 |
+
train:
|
15 |
+
resume: True
|
16 |
+
max_epochs: 10
|
17 |
+
max_steps: 200000
|
18 |
+
log_steps: 10
|
19 |
+
vis_steps: 500
|
20 |
+
checkpoint_steps: 1000
|
21 |
+
val_steps: 500
|
22 |
+
eval_steps: 1000
|
23 |
+
|
24 |
+
# python main_train_deca_release.py --cfg configs/release_version/deca_coarse.yml
|
decalib/DECA/configs/release_version/deca_detail.yml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# '''
|
2 |
+
# detail:
|
3 |
+
# This is followed by training the detail model (i.e. 𝐸𝑑
|
4 |
+
# and 𝐹𝑑
|
5 |
+
# ) on VGGFace2 and VoxCeleb2 with a batch size of 6, with
|
6 |
+
# 3 images per subject, and parameters 𝜆𝑝ℎ𝑜𝐷 = 2.0, 𝜆𝑚𝑟 𝑓 = 5𝑒 − 2,
|
7 |
+
# 𝜆𝑠𝑦𝑚 = 5𝑒 − 3, 𝜆𝑑𝑐 = 1.0, and 𝜆𝑟𝑒𝑔𝐷 = 5𝑒 − 3.
|
8 |
+
|
9 |
+
# why:
|
10 |
+
# '''
|
11 |
+
# pretrained_modelpath: '/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_SIGGRAPH/pretrain/model.tar'
|
12 |
+
output_dir: "/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_release_version/detail"
|
13 |
+
pretrained_modelpath: "/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_release_version/coarse/model.tar"
|
14 |
+
dataset:
|
15 |
+
batch_size: 6
|
16 |
+
K: 3
|
17 |
+
train:
|
18 |
+
train_detail: True
|
19 |
+
resume: True
|
20 |
+
max_epochs: 10
|
21 |
+
max_steps: 1000000
|
22 |
+
log_steps: 10
|
23 |
+
vis_steps: 500
|
24 |
+
checkpoint_steps: 1000
|
25 |
+
val_steps: 500
|
26 |
+
eval_steps: 1000
|
27 |
+
dataset:
|
28 |
+
training_data: ['vggface2', 'vox2']
|
29 |
+
# python main_train_deca_release.py --cfg configs/release_version/deca_coarse.yml
|
decalib/DECA/configs/release_version/deca_pretrain.yml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# '''
|
2 |
+
# first step:
|
3 |
+
# pre-train the coarse model (i.e.𝐸𝑐) for two epochs with a batch size of 64,
|
4 |
+
# with 𝜆𝑙𝑚𝑘=1𝑒−4, 𝜆𝑒𝑦𝑒=1.0, 𝜆𝜷=1𝑒−4, and 𝜆𝝍=1𝑒−4
|
5 |
+
|
6 |
+
# Why:
|
7 |
+
# training with only lmk loss for good initialization,
|
8 |
+
# because the use of photometric loss needs good initialization both in regression and optimization
|
9 |
+
# and also, photometric loss needs differentiable rendering that makes the training slow
|
10 |
+
#
|
11 |
+
#
|
12 |
+
# '''
|
13 |
+
output_dir: "/ps/scratch/yfeng/Data/Projects-data/DECA-training/training/DECA_release_version/pretrain"
|
14 |
+
pretrained_modelpath: ''
|
15 |
+
dataset:
|
16 |
+
batch_size: 64
|
17 |
+
K: 1
|
18 |
+
loss:
|
19 |
+
photo: 0.
|
20 |
+
id: 0.
|
21 |
+
useSeg: False
|
22 |
+
reg_tex: 0.
|
23 |
+
reg_light: 0.
|
24 |
+
shape_consistency: False
|
25 |
+
train:
|
26 |
+
resume: True
|
27 |
+
max_epochs: 10
|
28 |
+
max_steps: 100000
|
29 |
+
log_steps: 10
|
30 |
+
vis_steps: 500
|
31 |
+
checkpoint_steps: 1000
|
32 |
+
val_steps: 500
|
33 |
+
eval_steps: 1000
|
decalib/DECA/data/FLAME2020/Readme.pdf
ADDED
Binary file (19.3 kB). View file
|
|
decalib/DECA/data/fixed_displacement_256.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:296d01113d67bdaace6f6fe741f7d855e58dc0707f0bb113758520ffa5d8cb93
|
3 |
+
size 524416
|
decalib/DECA/data/head_template.obj
ADDED
The diff for this file is too large to render.
See raw diff
|
|
decalib/DECA/data/landmark_embedding.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8095348eeafce5a02f6bd8765146307f9567a3f03b316d788a2e47336d667954
|
3 |
+
size 31292
|
decalib/DECA/data/mean_texture.jpg
ADDED
![]() |