feng2022 commited on
Commit
12d50ff
1 Parent(s): c82ac8e
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Time_TravelRephotography/models/encoder4editing/.gitignore +133 -0
  2. Time_TravelRephotography/models/encoder4editing/LICENSE +21 -0
  3. Time_TravelRephotography/models/encoder4editing/README.md +143 -0
  4. Time_TravelRephotography/models/encoder4editing/__init__.py +15 -0
  5. Time_TravelRephotography/models/encoder4editing/bash_scripts/inference.sh +15 -0
  6. Time_TravelRephotography/models/encoder4editing/configs/__init__.py +0 -0
  7. Time_TravelRephotography/models/encoder4editing/configs/data_configs.py +41 -0
  8. Time_TravelRephotography/models/encoder4editing/configs/paths_config.py +28 -0
  9. Time_TravelRephotography/models/encoder4editing/configs/transforms_config.py +62 -0
  10. Time_TravelRephotography/models/encoder4editing/criteria/__init__.py +0 -0
  11. Time_TravelRephotography/models/encoder4editing/criteria/id_loss.py +47 -0
  12. Time_TravelRephotography/models/encoder4editing/criteria/lpips/__init__.py +0 -0
  13. Time_TravelRephotography/models/encoder4editing/criteria/lpips/lpips.py +35 -0
  14. Time_TravelRephotography/models/encoder4editing/criteria/lpips/networks.py +96 -0
  15. Time_TravelRephotography/models/encoder4editing/criteria/lpips/utils.py +30 -0
  16. Time_TravelRephotography/models/encoder4editing/criteria/moco_loss.py +71 -0
  17. Time_TravelRephotography/models/encoder4editing/criteria/w_norm.py +14 -0
  18. Time_TravelRephotography/models/encoder4editing/datasets/__init__.py +0 -0
  19. Time_TravelRephotography/models/encoder4editing/datasets/gt_res_dataset.py +32 -0
  20. Time_TravelRephotography/models/encoder4editing/datasets/images_dataset.py +33 -0
  21. Time_TravelRephotography/models/encoder4editing/datasets/inference_dataset.py +25 -0
  22. Time_TravelRephotography/models/encoder4editing/editings/ganspace.py +22 -0
  23. Time_TravelRephotography/models/encoder4editing/editings/ganspace_pca/cars_pca.pt +3 -0
  24. Time_TravelRephotography/models/encoder4editing/editings/ganspace_pca/ffhq_pca.pt +3 -0
  25. Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/age.pt +3 -0
  26. Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/pose.pt +3 -0
  27. Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/smile.pt +3 -0
  28. Time_TravelRephotography/models/encoder4editing/editings/latent_editor.py +45 -0
  29. Time_TravelRephotography/models/encoder4editing/editings/sefa.py +46 -0
  30. Time_TravelRephotography/models/encoder4editing/environment/e4e_env.yaml +73 -0
  31. Time_TravelRephotography/models/encoder4editing/metrics/LEC.py +134 -0
  32. Time_TravelRephotography/models/encoder4editing/models/__init__.py +0 -0
  33. Time_TravelRephotography/models/encoder4editing/models/discriminator.py +20 -0
  34. Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py +0 -0
  35. Time_TravelRephotography/models/encoder4editing/models/encoders/helpers.py +140 -0
  36. Time_TravelRephotography/models/encoder4editing/models/encoders/model_irse.py +84 -0
  37. Time_TravelRephotography/models/encoder4editing/models/encoders/psp_encoders.py +235 -0
  38. Time_TravelRephotography/models/encoder4editing/models/latent_codes_pool.py +55 -0
  39. Time_TravelRephotography/models/encoder4editing/models/psp.py +100 -0
  40. Time_TravelRephotography/models/encoder4editing/models/stylegan2/__init__.py +0 -0
  41. Time_TravelRephotography/models/encoder4editing/models/stylegan2/model.py +673 -0
  42. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/__init__.py +2 -0
  43. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_act.py +85 -0
  44. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_bias_act.cpp +21 -0
  45. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_bias_act_kernel.cu +99 -0
  46. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d.cpp +23 -0
  47. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d.py +184 -0
  48. Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d_kernel.cu +272 -0
  49. Time_TravelRephotography/models/encoder4editing/options/__init__.py +0 -0
  50. Time_TravelRephotography/models/encoder4editing/options/train_options.py +84 -0
Time_TravelRephotography/models/encoder4editing/.gitignore ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # Custom dataset
132
+ pretrained_models
133
+ results_test
Time_TravelRephotography/models/encoder4editing/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2021 omertov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Time_TravelRephotography/models/encoder4editing/README.md ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Designing an Encoder for StyleGAN Image Manipulation (SIGGRAPH 2021)
2
+ <a href="https://arxiv.org/abs/2102.02766"><img src="https://img.shields.io/badge/arXiv-2008.00951-b31b1b.svg"></a>
3
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-yellow.svg"></a>
4
+ [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](http://colab.research.google.com/github/omertov/encoder4editing/blob/main/notebooks/inference_playground.ipynb)
5
+
6
+ > Recently, there has been a surge of diverse methods for performing image editing by employing pre-trained unconditional generators. Applying these methods on real images, however, remains a challenge, as it necessarily requires the inversion of the images into their latent space. To successfully invert a real image, one needs to find a latent code that reconstructs the input image accurately, and more importantly, allows for its meaningful manipulation. In this paper, we carefully study the latent space of StyleGAN, the state-of-the-art unconditional generator. We identify and analyze the existence of a distortion-editability tradeoff and a distortion-perception tradeoff within the StyleGAN latent space. We then suggest two principles for designing encoders in a manner that allows one to control the proximity of the inversions to regions that StyleGAN was originally trained on. We present an encoder based on our two principles that is specifically designed for facilitating editing on real images by balancing these tradeoffs. By evaluating its performance qualitatively and quantitatively on numerous challenging domains, including cars and horses, we show that our inversion method, followed by common editing techniques, achieves superior real-image editing quality, with only a small reconstruction accuracy drop.
7
+
8
+ <p align="center">
9
+ <img src="docs/teaser.jpg" width="800px"/>
10
+ </p>
11
+
12
+ ## Description
13
+ Official Implementation of "<a href="https://arxiv.org/abs/2102.02766">Designing an Encoder for StyleGAN Image Manipulation</a>" paper for both training and evaluation.
14
+ The e4e encoder is specifically designed to complement existing image manipulation techniques performed over StyleGAN's latent space.
15
+
16
+ ## Recent Updates
17
+ `2021.08.17`: Add single style code encoder (use `--encoder_type SingleStyleCodeEncoder`). <br />
18
+ `2021.03.25`: Add pose editing direction.
19
+
20
+ ## Getting Started
21
+ ### Prerequisites
22
+ - Linux or macOS
23
+ - NVIDIA GPU + CUDA CuDNN (CPU may be possible with some modifications, but is not inherently supported)
24
+ - Python 3
25
+
26
+ ### Installation
27
+ - Clone the repository:
28
+ ```
29
+ git clone https://github.com/omertov/encoder4editing.git
30
+ cd encoder4editing
31
+ ```
32
+ - Dependencies:
33
+ We recommend running this repository using [Anaconda](https://docs.anaconda.com/anaconda/install/).
34
+ All dependencies for defining the environment are provided in `environment/e4e_env.yaml`.
35
+
36
+ ### Inference Notebook
37
+ We provide a Jupyter notebook found in `notebooks/inference_playground.ipynb` that allows one to encode and perform several editings on real images using StyleGAN.
38
+
39
+ ### Pretrained Models
40
+ Please download the pre-trained models from the following links. Each e4e model contains the entire pSp framework architecture, including the encoder and decoder weights.
41
+ | Path | Description
42
+ | :--- | :----------
43
+ |[FFHQ Inversion](https://drive.google.com/file/d/1cUv_reLE6k3604or78EranS7XzuVMWeO/view?usp=sharing) | FFHQ e4e encoder.
44
+ |[Cars Inversion](https://drive.google.com/file/d/17faPqBce2m1AQeLCLHUVXaDfxMRU2QcV/view?usp=sharing) | Cars e4e encoder.
45
+ |[Horse Inversion](https://drive.google.com/file/d/1TkLLnuX86B_BMo2ocYD0kX9kWh53rUVX/view?usp=sharing) | Horse e4e encoder.
46
+ |[Church Inversion](https://drive.google.com/file/d/1-L0ZdnQLwtdy6-A_Ccgq5uNJGTqE7qBa/view?usp=sharing) | Church e4e encoder.
47
+
48
+ If you wish to use one of the pretrained models for training or inference, you may do so using the flag `--checkpoint_path`.
49
+
50
+ In addition, we provide various auxiliary models needed for training your own e4e model from scratch.
51
+ | Path | Description
52
+ | :--- | :----------
53
+ |[FFHQ StyleGAN](https://drive.google.com/file/d/1EM87UquaoQmk17Q8d5kYIAHqu0dkYqdT/view?usp=sharing) | StyleGAN model pretrained on FFHQ taken from [rosinality](https://github.com/rosinality/stylegan2-pytorch) with 1024x1024 output resolution.
54
+ |[IR-SE50 Model](https://drive.google.com/file/d/1KW7bjndL3QG3sxBbZxreGHigcCCpsDgn/view?usp=sharing) | Pretrained IR-SE50 model taken from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) for use in our ID loss during training.
55
+ |[MOCOv2 Model](https://drive.google.com/file/d/18rLcNGdteX5LwT7sv_F7HWr12HpVEzVe/view?usp=sharing) | Pretrained ResNet-50 model trained using MOCOv2 for use in our simmilarity loss for domains other then human faces during training.
56
+
57
+ By default, we assume that all auxiliary models are downloaded and saved to the directory `pretrained_models`. However, you may use your own paths by changing the necessary values in `configs/path_configs.py`.
58
+
59
+ ## Training
60
+ To train the e4e encoder, make sure the paths to the required models, as well as training and testing data is configured in `configs/path_configs.py` and `configs/data_configs.py`.
61
+ #### **Training the e4e Encoder**
62
+ ```
63
+ python scripts/train.py \
64
+ --dataset_type cars_encode \
65
+ --exp_dir new/experiment/directory \
66
+ --start_from_latent_avg \
67
+ --use_w_pool \
68
+ --w_discriminator_lambda 0.1 \
69
+ --progressive_start 20000 \
70
+ --id_lambda 0.5 \
71
+ --val_interval 10000 \
72
+ --max_steps 200000 \
73
+ --stylegan_size 512 \
74
+ --stylegan_weights path/to/pretrained/stylegan.pt \
75
+ --workers 8 \
76
+ --batch_size 8 \
77
+ --test_batch_size 4 \
78
+ --test_workers 4
79
+ ```
80
+
81
+ #### Training on your own dataset
82
+ In order to train the e4e encoder on a custom dataset, perform the following adjustments:
83
+ 1. Insert the paths to your train and test data into the `dataset_paths` variable defined in `configs/paths_config.py`:
84
+ ```
85
+ dataset_paths = {
86
+ 'my_train_data': '/path/to/train/images/directory',
87
+ 'my_test_data': '/path/to/test/images/directory'
88
+ }
89
+ ```
90
+ 2. Configure a new dataset under the DATASETS variable defined in `configs/data_configs.py`:
91
+ ```
92
+ DATASETS = {
93
+ 'my_data_encode': {
94
+ 'transforms': transforms_config.EncodeTransforms,
95
+ 'train_source_root': dataset_paths['my_train_data'],
96
+ 'train_target_root': dataset_paths['my_train_data'],
97
+ 'test_source_root': dataset_paths['my_test_data'],
98
+ 'test_target_root': dataset_paths['my_test_data']
99
+ }
100
+ }
101
+ ```
102
+ Refer to `configs/transforms_config.py` for the transformations applied to the train and test images during training.
103
+
104
+ 3. Finally, run a training session with `--dataset_type my_data_encode`.
105
+
106
+ ## Inference
107
+ Having trained your model, you can use `scripts/inference.py` to apply the model on a set of images.
108
+ For example,
109
+ ```
110
+ python scripts/inference.py \
111
+ --images_dir=/path/to/images/directory \
112
+ --save_dir=/path/to/saving/directory \
113
+ path/to/checkpoint.pt
114
+ ```
115
+
116
+ ## Latent Editing Consistency (LEC)
117
+ As described in the paper, we suggest a new metric, Latent Editing Consistency (LEC), for evaluating the encoder's
118
+ performance.
119
+ We provide an example for calculating the metric over the FFHQ StyleGAN using the aging editing direction in
120
+ `metrics/LEC.py`.
121
+
122
+ To run the example:
123
+ ```
124
+ cd metrics
125
+ python LEC.py \
126
+ --images_dir=/path/to/images/directory \
127
+ path/to/checkpoint.pt
128
+ ```
129
+
130
+ ## Acknowledgments
131
+ This code borrows heavily from [pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel)
132
+
133
+ ## Citation
134
+ If you use this code for your research, please cite our paper <a href="https://arxiv.org/abs/2102.02766">Designing an Encoder for StyleGAN Image Manipulation</a>:
135
+
136
+ ```
137
+ @article{tov2021designing,
138
+ title={Designing an Encoder for StyleGAN Image Manipulation},
139
+ author={Tov, Omer and Alaluf, Yuval and Nitzan, Yotam and Patashnik, Or and Cohen-Or, Daniel},
140
+ journal={arXiv preprint arXiv:2102.02766},
141
+ year={2021}
142
+ }
143
+ ```
Time_TravelRephotography/models/encoder4editing/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils.model_utils import setup_model
2
+
3
+
4
+ def get_latents(net, x, is_cars=False):
5
+ codes = net.encoder(x)
6
+ if net.opts.start_from_latent_avg:
7
+ if codes.ndim == 2:
8
+ codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
9
+ else:
10
+ codes = codes + net.latent_avg.repeat(codes.shape[0], 1, 1)
11
+ if codes.shape[1] == 18 and is_cars:
12
+ codes = codes[:, :16, :]
13
+ return codes
14
+
15
+
Time_TravelRephotography/models/encoder4editing/bash_scripts/inference.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -exo
2
+
3
+ list="$1"
4
+ ckpt="${2:-pretrained_models/e4e_ffhq_encode.pt}"
5
+
6
+ base_dir="$REPHOTO/dataset/historically_interesting/aligned/manual_celebrity_in_19th_century/tier1/${list}/"
7
+ save_dir="results_test/${list}/"
8
+
9
+
10
+ TORCH_EXTENSIONS_DIR=/tmp/torch_extensions
11
+ PYTHONPATH="" \
12
+ python scripts/inference.py \
13
+ --images_dir="${base_dir}" \
14
+ --save_dir="${save_dir}" \
15
+ "${ckpt}"
Time_TravelRephotography/models/encoder4editing/configs/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/configs/data_configs.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from configs import transforms_config
2
+ from configs.paths_config import dataset_paths
3
+
4
+
5
+ DATASETS = {
6
+ 'ffhq_encode': {
7
+ 'transforms': transforms_config.EncodeTransforms,
8
+ 'train_source_root': dataset_paths['ffhq'],
9
+ 'train_target_root': dataset_paths['ffhq'],
10
+ 'test_source_root': dataset_paths['celeba_test'],
11
+ 'test_target_root': dataset_paths['celeba_test'],
12
+ },
13
+ 'cars_encode': {
14
+ 'transforms': transforms_config.CarsEncodeTransforms,
15
+ 'train_source_root': dataset_paths['cars_train'],
16
+ 'train_target_root': dataset_paths['cars_train'],
17
+ 'test_source_root': dataset_paths['cars_test'],
18
+ 'test_target_root': dataset_paths['cars_test'],
19
+ },
20
+ 'horse_encode': {
21
+ 'transforms': transforms_config.EncodeTransforms,
22
+ 'train_source_root': dataset_paths['horse_train'],
23
+ 'train_target_root': dataset_paths['horse_train'],
24
+ 'test_source_root': dataset_paths['horse_test'],
25
+ 'test_target_root': dataset_paths['horse_test'],
26
+ },
27
+ 'church_encode': {
28
+ 'transforms': transforms_config.EncodeTransforms,
29
+ 'train_source_root': dataset_paths['church_train'],
30
+ 'train_target_root': dataset_paths['church_train'],
31
+ 'test_source_root': dataset_paths['church_test'],
32
+ 'test_target_root': dataset_paths['church_test'],
33
+ },
34
+ 'cats_encode': {
35
+ 'transforms': transforms_config.EncodeTransforms,
36
+ 'train_source_root': dataset_paths['cats_train'],
37
+ 'train_target_root': dataset_paths['cats_train'],
38
+ 'test_source_root': dataset_paths['cats_test'],
39
+ 'test_target_root': dataset_paths['cats_test'],
40
+ }
41
+ }
Time_TravelRephotography/models/encoder4editing/configs/paths_config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_paths = {
2
+ # Face Datasets (In the paper: FFHQ - train, CelebAHQ - test)
3
+ 'ffhq': '',
4
+ 'celeba_test': '',
5
+
6
+ # Cars Dataset (In the paper: Stanford cars)
7
+ 'cars_train': '',
8
+ 'cars_test': '',
9
+
10
+ # Horse Dataset (In the paper: LSUN Horse)
11
+ 'horse_train': '',
12
+ 'horse_test': '',
13
+
14
+ # Church Dataset (In the paper: LSUN Church)
15
+ 'church_train': '',
16
+ 'church_test': '',
17
+
18
+ # Cats Dataset (In the paper: LSUN Cat)
19
+ 'cats_train': '',
20
+ 'cats_test': ''
21
+ }
22
+
23
+ model_paths = {
24
+ 'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pt',
25
+ 'ir_se50': 'pretrained_models/model_ir_se50.pth',
26
+ 'shape_predictor': 'pretrained_models/shape_predictor_68_face_landmarks.dat',
27
+ 'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth'
28
+ }
Time_TravelRephotography/models/encoder4editing/configs/transforms_config.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import torchvision.transforms as transforms
3
+
4
+
5
+ class TransformsConfig(object):
6
+
7
+ def __init__(self, opts):
8
+ self.opts = opts
9
+
10
+ @abstractmethod
11
+ def get_transforms(self):
12
+ pass
13
+
14
+
15
+ class EncodeTransforms(TransformsConfig):
16
+
17
+ def __init__(self, opts):
18
+ super(EncodeTransforms, self).__init__(opts)
19
+
20
+ def get_transforms(self):
21
+ transforms_dict = {
22
+ 'transform_gt_train': transforms.Compose([
23
+ transforms.Resize((256, 256)),
24
+ transforms.RandomHorizontalFlip(0.5),
25
+ transforms.ToTensor(),
26
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
27
+ 'transform_source': None,
28
+ 'transform_test': transforms.Compose([
29
+ transforms.Resize((256, 256)),
30
+ transforms.ToTensor(),
31
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
32
+ 'transform_inference': transforms.Compose([
33
+ transforms.Resize((256, 256)),
34
+ transforms.ToTensor(),
35
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
36
+ }
37
+ return transforms_dict
38
+
39
+
40
+ class CarsEncodeTransforms(TransformsConfig):
41
+
42
+ def __init__(self, opts):
43
+ super(CarsEncodeTransforms, self).__init__(opts)
44
+
45
+ def get_transforms(self):
46
+ transforms_dict = {
47
+ 'transform_gt_train': transforms.Compose([
48
+ transforms.Resize((192, 256)),
49
+ transforms.RandomHorizontalFlip(0.5),
50
+ transforms.ToTensor(),
51
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
52
+ 'transform_source': None,
53
+ 'transform_test': transforms.Compose([
54
+ transforms.Resize((192, 256)),
55
+ transforms.ToTensor(),
56
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
57
+ 'transform_inference': transforms.Compose([
58
+ transforms.Resize((192, 256)),
59
+ transforms.ToTensor(),
60
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
61
+ }
62
+ return transforms_dict
Time_TravelRephotography/models/encoder4editing/criteria/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/criteria/id_loss.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from configs.paths_config import model_paths
4
+ from models.encoders.model_irse import Backbone
5
+
6
+
7
+ class IDLoss(nn.Module):
8
+ def __init__(self):
9
+ super(IDLoss, self).__init__()
10
+ print('Loading ResNet ArcFace')
11
+ self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
12
+ self.facenet.load_state_dict(torch.load(model_paths['ir_se50']))
13
+ self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
14
+ self.facenet.eval()
15
+ for module in [self.facenet, self.face_pool]:
16
+ for param in module.parameters():
17
+ param.requires_grad = False
18
+
19
+ def extract_feats(self, x):
20
+ x = x[:, :, 35:223, 32:220] # Crop interesting region
21
+ x = self.face_pool(x)
22
+ x_feats = self.facenet(x)
23
+ return x_feats
24
+
25
+ def forward(self, y_hat, y, x):
26
+ n_samples = x.shape[0]
27
+ x_feats = self.extract_feats(x)
28
+ y_feats = self.extract_feats(y) # Otherwise use the feature from there
29
+ y_hat_feats = self.extract_feats(y_hat)
30
+ y_feats = y_feats.detach()
31
+ loss = 0
32
+ sim_improvement = 0
33
+ id_logs = []
34
+ count = 0
35
+ for i in range(n_samples):
36
+ diff_target = y_hat_feats[i].dot(y_feats[i])
37
+ diff_input = y_hat_feats[i].dot(x_feats[i])
38
+ diff_views = y_feats[i].dot(x_feats[i])
39
+ id_logs.append({'diff_target': float(diff_target),
40
+ 'diff_input': float(diff_input),
41
+ 'diff_views': float(diff_views)})
42
+ loss += 1 - diff_target
43
+ id_diff = float(diff_target) - float(diff_views)
44
+ sim_improvement += id_diff
45
+ count += 1
46
+
47
+ return loss / count, sim_improvement / count, id_logs
Time_TravelRephotography/models/encoder4editing/criteria/lpips/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/criteria/lpips/lpips.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from criteria.lpips.networks import get_network, LinLayers
5
+ from criteria.lpips.utils import get_state_dict
6
+
7
+
8
+ class LPIPS(nn.Module):
9
+ r"""Creates a criterion that measures
10
+ Learned Perceptual Image Patch Similarity (LPIPS).
11
+ Arguments:
12
+ net_type (str): the network type to compare the features:
13
+ 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
14
+ version (str): the version of LPIPS. Default: 0.1.
15
+ """
16
+ def __init__(self, net_type: str = 'alex', version: str = '0.1'):
17
+
18
+ assert version in ['0.1'], 'v0.1 is only supported now'
19
+
20
+ super(LPIPS, self).__init__()
21
+
22
+ # pretrained network
23
+ self.net = get_network(net_type).to("cuda")
24
+
25
+ # linear layers
26
+ self.lin = LinLayers(self.net.n_channels_list).to("cuda")
27
+ self.lin.load_state_dict(get_state_dict(net_type, version))
28
+
29
+ def forward(self, x: torch.Tensor, y: torch.Tensor):
30
+ feat_x, feat_y = self.net(x), self.net(y)
31
+
32
+ diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
33
+ res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
34
+
35
+ return torch.sum(torch.cat(res, 0)) / x.shape[0]
Time_TravelRephotography/models/encoder4editing/criteria/lpips/networks.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Sequence
2
+
3
+ from itertools import chain
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torchvision import models
8
+
9
+ from criteria.lpips.utils import normalize_activation
10
+
11
+
12
+ def get_network(net_type: str):
13
+ if net_type == 'alex':
14
+ return AlexNet()
15
+ elif net_type == 'squeeze':
16
+ return SqueezeNet()
17
+ elif net_type == 'vgg':
18
+ return VGG16()
19
+ else:
20
+ raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
21
+
22
+
23
+ class LinLayers(nn.ModuleList):
24
+ def __init__(self, n_channels_list: Sequence[int]):
25
+ super(LinLayers, self).__init__([
26
+ nn.Sequential(
27
+ nn.Identity(),
28
+ nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
29
+ ) for nc in n_channels_list
30
+ ])
31
+
32
+ for param in self.parameters():
33
+ param.requires_grad = False
34
+
35
+
36
+ class BaseNet(nn.Module):
37
+ def __init__(self):
38
+ super(BaseNet, self).__init__()
39
+
40
+ # register buffer
41
+ self.register_buffer(
42
+ 'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
43
+ self.register_buffer(
44
+ 'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
45
+
46
+ def set_requires_grad(self, state: bool):
47
+ for param in chain(self.parameters(), self.buffers()):
48
+ param.requires_grad = state
49
+
50
+ def z_score(self, x: torch.Tensor):
51
+ return (x - self.mean) / self.std
52
+
53
+ def forward(self, x: torch.Tensor):
54
+ x = self.z_score(x)
55
+
56
+ output = []
57
+ for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
58
+ x = layer(x)
59
+ if i in self.target_layers:
60
+ output.append(normalize_activation(x))
61
+ if len(output) == len(self.target_layers):
62
+ break
63
+ return output
64
+
65
+
66
+ class SqueezeNet(BaseNet):
67
+ def __init__(self):
68
+ super(SqueezeNet, self).__init__()
69
+
70
+ self.layers = models.squeezenet1_1(True).features
71
+ self.target_layers = [2, 5, 8, 10, 11, 12, 13]
72
+ self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
73
+
74
+ self.set_requires_grad(False)
75
+
76
+
77
+ class AlexNet(BaseNet):
78
+ def __init__(self):
79
+ super(AlexNet, self).__init__()
80
+
81
+ self.layers = models.alexnet(True).features
82
+ self.target_layers = [2, 5, 8, 10, 12]
83
+ self.n_channels_list = [64, 192, 384, 256, 256]
84
+
85
+ self.set_requires_grad(False)
86
+
87
+
88
+ class VGG16(BaseNet):
89
+ def __init__(self):
90
+ super(VGG16, self).__init__()
91
+
92
+ self.layers = models.vgg16(True).features
93
+ self.target_layers = [4, 9, 16, 23, 30]
94
+ self.n_channels_list = [64, 128, 256, 512, 512]
95
+
96
+ self.set_requires_grad(False)
Time_TravelRephotography/models/encoder4editing/criteria/lpips/utils.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ import torch
4
+
5
+
6
+ def normalize_activation(x, eps=1e-10):
7
+ norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
8
+ return x / (norm_factor + eps)
9
+
10
+
11
+ def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
12
+ # build url
13
+ url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
14
+ + f'master/lpips/weights/v{version}/{net_type}.pth'
15
+
16
+ # download
17
+ old_state_dict = torch.hub.load_state_dict_from_url(
18
+ url, progress=True,
19
+ map_location=None if torch.cuda.is_available() else torch.device('cpu')
20
+ )
21
+
22
+ # rename keys
23
+ new_state_dict = OrderedDict()
24
+ for key, val in old_state_dict.items():
25
+ new_key = key
26
+ new_key = new_key.replace('lin', '')
27
+ new_key = new_key.replace('model.', '')
28
+ new_state_dict[new_key] = val
29
+
30
+ return new_state_dict
Time_TravelRephotography/models/encoder4editing/criteria/moco_loss.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+
5
+ from configs.paths_config import model_paths
6
+
7
+
8
+ class MocoLoss(nn.Module):
9
+
10
+ def __init__(self, opts):
11
+ super(MocoLoss, self).__init__()
12
+ print("Loading MOCO model from path: {}".format(model_paths["moco"]))
13
+ self.model = self.__load_model()
14
+ self.model.eval()
15
+ for param in self.model.parameters():
16
+ param.requires_grad = False
17
+
18
+ @staticmethod
19
+ def __load_model():
20
+ import torchvision.models as models
21
+ model = models.__dict__["resnet50"]()
22
+ # freeze all layers but the last fc
23
+ for name, param in model.named_parameters():
24
+ if name not in ['fc.weight', 'fc.bias']:
25
+ param.requires_grad = False
26
+ checkpoint = torch.load(model_paths['moco'], map_location="cpu")
27
+ state_dict = checkpoint['state_dict']
28
+ # rename moco pre-trained keys
29
+ for k in list(state_dict.keys()):
30
+ # retain only encoder_q up to before the embedding layer
31
+ if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
32
+ # remove prefix
33
+ state_dict[k[len("module.encoder_q."):]] = state_dict[k]
34
+ # delete renamed or unused k
35
+ del state_dict[k]
36
+ msg = model.load_state_dict(state_dict, strict=False)
37
+ assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
38
+ # remove output layer
39
+ model = nn.Sequential(*list(model.children())[:-1]).cuda()
40
+ return model
41
+
42
+ def extract_feats(self, x):
43
+ x = F.interpolate(x, size=224)
44
+ x_feats = self.model(x)
45
+ x_feats = nn.functional.normalize(x_feats, dim=1)
46
+ x_feats = x_feats.squeeze()
47
+ return x_feats
48
+
49
+ def forward(self, y_hat, y, x):
50
+ n_samples = x.shape[0]
51
+ x_feats = self.extract_feats(x)
52
+ y_feats = self.extract_feats(y)
53
+ y_hat_feats = self.extract_feats(y_hat)
54
+ y_feats = y_feats.detach()
55
+ loss = 0
56
+ sim_improvement = 0
57
+ sim_logs = []
58
+ count = 0
59
+ for i in range(n_samples):
60
+ diff_target = y_hat_feats[i].dot(y_feats[i])
61
+ diff_input = y_hat_feats[i].dot(x_feats[i])
62
+ diff_views = y_feats[i].dot(x_feats[i])
63
+ sim_logs.append({'diff_target': float(diff_target),
64
+ 'diff_input': float(diff_input),
65
+ 'diff_views': float(diff_views)})
66
+ loss += 1 - diff_target
67
+ sim_diff = float(diff_target) - float(diff_views)
68
+ sim_improvement += sim_diff
69
+ count += 1
70
+
71
+ return loss / count, sim_improvement / count, sim_logs
Time_TravelRephotography/models/encoder4editing/criteria/w_norm.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class WNormLoss(nn.Module):
6
+
7
+ def __init__(self, start_from_latent_avg=True):
8
+ super(WNormLoss, self).__init__()
9
+ self.start_from_latent_avg = start_from_latent_avg
10
+
11
+ def forward(self, latent, latent_avg=None):
12
+ if self.start_from_latent_avg:
13
+ latent = latent - latent_avg
14
+ return torch.sum(latent.norm(2, dim=(1, 2))) / latent.shape[0]
Time_TravelRephotography/models/encoder4editing/datasets/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/datasets/gt_res_dataset.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ # encoding: utf-8
3
+ import os
4
+ from torch.utils.data import Dataset
5
+ from PIL import Image
6
+ import torch
7
+
8
+ class GTResDataset(Dataset):
9
+
10
+ def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None):
11
+ self.pairs = []
12
+ for f in os.listdir(root_path):
13
+ image_path = os.path.join(root_path, f)
14
+ gt_path = os.path.join(gt_dir, f)
15
+ if f.endswith(".jpg") or f.endswith(".png"):
16
+ self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None])
17
+ self.transform = transform
18
+ self.transform_train = transform_train
19
+
20
+ def __len__(self):
21
+ return len(self.pairs)
22
+
23
+ def __getitem__(self, index):
24
+ from_path, to_path, _ = self.pairs[index]
25
+ from_im = Image.open(from_path).convert('RGB')
26
+ to_im = Image.open(to_path).convert('RGB')
27
+
28
+ if self.transform:
29
+ to_im = self.transform(to_im)
30
+ from_im = self.transform(from_im)
31
+
32
+ return from_im, to_im
Time_TravelRephotography/models/encoder4editing/datasets/images_dataset.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset
2
+ from PIL import Image
3
+ from utils import data_utils
4
+
5
+
6
+ class ImagesDataset(Dataset):
7
+
8
+ def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
9
+ self.source_paths = sorted(data_utils.make_dataset(source_root))
10
+ self.target_paths = sorted(data_utils.make_dataset(target_root))
11
+ self.source_transform = source_transform
12
+ self.target_transform = target_transform
13
+ self.opts = opts
14
+
15
+ def __len__(self):
16
+ return len(self.source_paths)
17
+
18
+ def __getitem__(self, index):
19
+ from_path = self.source_paths[index]
20
+ from_im = Image.open(from_path)
21
+ from_im = from_im.convert('RGB')
22
+
23
+ to_path = self.target_paths[index]
24
+ to_im = Image.open(to_path).convert('RGB')
25
+ if self.target_transform:
26
+ to_im = self.target_transform(to_im)
27
+
28
+ if self.source_transform:
29
+ from_im = self.source_transform(from_im)
30
+ else:
31
+ from_im = to_im
32
+
33
+ return from_im, to_im
Time_TravelRephotography/models/encoder4editing/datasets/inference_dataset.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset
2
+ from PIL import Image
3
+ from utils import data_utils
4
+
5
+
6
+ class InferenceDataset(Dataset):
7
+
8
+ def __init__(self, root, opts, transform=None, preprocess=None):
9
+ self.paths = sorted(data_utils.make_dataset(root))
10
+ self.transform = transform
11
+ self.preprocess = preprocess
12
+ self.opts = opts
13
+
14
+ def __len__(self):
15
+ return len(self.paths)
16
+
17
+ def __getitem__(self, index):
18
+ from_path = self.paths[index]
19
+ if self.preprocess is not None:
20
+ from_im = self.preprocess(from_path)
21
+ else:
22
+ from_im = Image.open(from_path).convert('RGB')
23
+ if self.transform:
24
+ from_im = self.transform(from_im)
25
+ return from_im
Time_TravelRephotography/models/encoder4editing/editings/ganspace.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def edit(latents, pca, edit_directions):
5
+ edit_latents = []
6
+ for latent in latents:
7
+ for pca_idx, start, end, strength in edit_directions:
8
+ delta = get_delta(pca, latent, pca_idx, strength)
9
+ delta_padded = torch.zeros(latent.shape).to('cuda')
10
+ delta_padded[start:end] += delta.repeat(end - start, 1)
11
+ edit_latents.append(latent + delta_padded)
12
+ return torch.stack(edit_latents)
13
+
14
+
15
+ def get_delta(pca, latent, idx, strength):
16
+ # pca: ganspace checkpoint. latent: (16, 512) w+
17
+ w_centered = latent - pca['mean'].to('cuda')
18
+ lat_comp = pca['comp'].to('cuda')
19
+ lat_std = pca['std'].to('cuda')
20
+ w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx]
21
+ delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx]
22
+ return delta
Time_TravelRephotography/models/encoder4editing/editings/ganspace_pca/cars_pca.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c3bae61ecd85de077fbbf103f5f30cf4b7676fe23a8508166eaf2ce73c8392
3
+ size 167562
Time_TravelRephotography/models/encoder4editing/editings/ganspace_pca/ffhq_pca.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d7f9df1c96180d9026b9cb8d04753579fbf385f321a9d0e263641601c5e5d36
3
+ size 167562
Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/age.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50074516b1629707d89b5e43d6b8abd1792212fa3b961a87a11323d6a5222ae0
3
+ size 2808
Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/pose.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:736e0eacc8488fa0b020a2e7bd256b957284c364191dfea693705e5d06d43e7d
3
+ size 37624
Time_TravelRephotography/models/encoder4editing/editings/interfacegan_directions/smile.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:817a7e732b59dee9eba862bec8bd7e8373568443bc9f9731a21cf9b0356f0653
3
+ size 2808
Time_TravelRephotography/models/encoder4editing/editings/latent_editor.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import sys
3
+ sys.path.append(".")
4
+ sys.path.append("..")
5
+ from editings import ganspace, sefa
6
+ from utils.common import tensor2im
7
+
8
+
9
+ class LatentEditor(object):
10
+ def __init__(self, stylegan_generator, is_cars=False):
11
+ self.generator = stylegan_generator
12
+ self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output.
13
+
14
+ def apply_ganspace(self, latent, ganspace_pca, edit_directions):
15
+ edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions)
16
+ return self._latents_to_image(edit_latents)
17
+
18
+ def apply_interfacegan(self, latent, direction, factor=1, factor_range=None):
19
+ edit_latents = []
20
+ if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5)
21
+ for f in range(*factor_range):
22
+ edit_latent = latent + f * direction
23
+ edit_latents.append(edit_latent)
24
+ edit_latents = torch.cat(edit_latents)
25
+ else:
26
+ edit_latents = latent + factor * direction
27
+ return self._latents_to_image(edit_latents)
28
+
29
+ def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs):
30
+ edit_latents = sefa.edit(self.generator, latent, indices, **kwargs)
31
+ return self._latents_to_image(edit_latents)
32
+
33
+ # Currently, in order to apply StyleFlow editings, one should run inference,
34
+ # save the latent codes and load them form the official StyleFlow repository.
35
+ # def apply_styleflow(self):
36
+ # pass
37
+
38
+ def _latents_to_image(self, latents):
39
+ with torch.no_grad():
40
+ images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True)
41
+ if self.is_cars:
42
+ images = images[:, :, 64:448, :] # 512x512 -> 384x512
43
+ horizontal_concat_image = torch.cat(list(images), 2)
44
+ final_image = tensor2im(horizontal_concat_image)
45
+ return final_image
Time_TravelRephotography/models/encoder4editing/editings/sefa.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from tqdm import tqdm
4
+
5
+
6
+ def edit(generator, latents, indices, semantics=1, start_distance=-15.0, end_distance=15.0, num_samples=1, step=11):
7
+
8
+ layers, boundaries, values = factorize_weight(generator, indices)
9
+ codes = latents.detach().cpu().numpy() # (1,18,512)
10
+
11
+ # Generate visualization pages.
12
+ distances = np.linspace(start_distance, end_distance, step)
13
+ num_sam = num_samples
14
+ num_sem = semantics
15
+
16
+ edited_latents = []
17
+ for sem_id in tqdm(range(num_sem), desc='Semantic ', leave=False):
18
+ boundary = boundaries[sem_id:sem_id + 1]
19
+ for sam_id in tqdm(range(num_sam), desc='Sample ', leave=False):
20
+ code = codes[sam_id:sam_id + 1]
21
+ for col_id, d in enumerate(distances, start=1):
22
+ temp_code = code.copy()
23
+ temp_code[:, layers, :] += boundary * d
24
+ edited_latents.append(torch.from_numpy(temp_code).float().cuda())
25
+ return torch.cat(edited_latents)
26
+
27
+
28
+ def factorize_weight(g_ema, layers='all'):
29
+
30
+ weights = []
31
+ if layers == 'all' or 0 in layers:
32
+ weight = g_ema.conv1.conv.modulation.weight.T
33
+ weights.append(weight.cpu().detach().numpy())
34
+
35
+ if layers == 'all':
36
+ layers = list(range(g_ema.num_layers - 1))
37
+ else:
38
+ layers = [l - 1 for l in layers if l != 0]
39
+
40
+ for idx in layers:
41
+ weight = g_ema.convs[idx].conv.modulation.weight.T
42
+ weights.append(weight.cpu().detach().numpy())
43
+ weight = np.concatenate(weights, axis=1).astype(np.float32)
44
+ weight = weight / np.linalg.norm(weight, axis=0, keepdims=True)
45
+ eigen_values, eigen_vectors = np.linalg.eig(weight.dot(weight.T))
46
+ return layers, eigen_vectors.T, eigen_values
Time_TravelRephotography/models/encoder4editing/environment/e4e_env.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: e4e_env
2
+ channels:
3
+ - conda-forge
4
+ - defaults
5
+ dependencies:
6
+ - _libgcc_mutex=0.1=main
7
+ - ca-certificates=2020.4.5.1=hecc5488_0
8
+ - certifi=2020.4.5.1=py36h9f0ad1d_0
9
+ - libedit=3.1.20181209=hc058e9b_0
10
+ - libffi=3.2.1=hd88cf55_4
11
+ - libgcc-ng=9.1.0=hdf63c60_0
12
+ - libstdcxx-ng=9.1.0=hdf63c60_0
13
+ - ncurses=6.2=he6710b0_1
14
+ - ninja=1.10.0=hc9558a2_0
15
+ - openssl=1.1.1g=h516909a_0
16
+ - pip=20.0.2=py36_3
17
+ - python=3.6.7=h0371630_0
18
+ - python_abi=3.6=1_cp36m
19
+ - readline=7.0=h7b6447c_5
20
+ - setuptools=46.4.0=py36_0
21
+ - sqlite=3.31.1=h62c20be_1
22
+ - tk=8.6.8=hbc83047_0
23
+ - wheel=0.34.2=py36_0
24
+ - xz=5.2.5=h7b6447c_0
25
+ - zlib=1.2.11=h7b6447c_3
26
+ - pip:
27
+ - absl-py==0.9.0
28
+ - cachetools==4.1.0
29
+ - chardet==3.0.4
30
+ - cycler==0.10.0
31
+ - decorator==4.4.2
32
+ - future==0.18.2
33
+ - google-auth==1.15.0
34
+ - google-auth-oauthlib==0.4.1
35
+ - grpcio==1.29.0
36
+ - idna==2.9
37
+ - imageio==2.8.0
38
+ - importlib-metadata==1.6.0
39
+ - kiwisolver==1.2.0
40
+ - markdown==3.2.2
41
+ - matplotlib==3.2.1
42
+ - mxnet==1.6.0
43
+ - networkx==2.4
44
+ - numpy==1.18.4
45
+ - oauthlib==3.1.0
46
+ - opencv-python==4.2.0.34
47
+ - pillow==7.1.2
48
+ - protobuf==3.12.1
49
+ - pyasn1==0.4.8
50
+ - pyasn1-modules==0.2.8
51
+ - pyparsing==2.4.7
52
+ - python-dateutil==2.8.1
53
+ - pytorch-lightning==0.7.1
54
+ - pywavelets==1.1.1
55
+ - requests==2.23.0
56
+ - requests-oauthlib==1.3.0
57
+ - rsa==4.0
58
+ - scikit-image==0.17.2
59
+ - scipy==1.4.1
60
+ - six==1.15.0
61
+ - tensorboard==2.2.1
62
+ - tensorboard-plugin-wit==1.6.0.post3
63
+ - tensorboardx==1.9
64
+ - tifffile==2020.5.25
65
+ - torch==1.6.0
66
+ - torchvision==0.7.1
67
+ - tqdm==4.46.0
68
+ - urllib3==1.25.9
69
+ - werkzeug==1.0.1
70
+ - zipp==3.1.0
71
+ - pyaml
72
+ prefix: ~/anaconda3/envs/e4e_env
73
+
Time_TravelRephotography/models/encoder4editing/metrics/LEC.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+ import torch
4
+ import numpy as np
5
+ from torch.utils.data import DataLoader
6
+
7
+ sys.path.append(".")
8
+ sys.path.append("..")
9
+
10
+ from configs import data_configs
11
+ from datasets.images_dataset import ImagesDataset
12
+ from utils.model_utils import setup_model
13
+
14
+
15
+ class LEC:
16
+ def __init__(self, net, is_cars=False):
17
+ """
18
+ Latent Editing Consistency metric as proposed in the main paper.
19
+ :param net: e4e model loaded over the pSp framework.
20
+ :param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images.
21
+ """
22
+ self.net = net
23
+ self.is_cars = is_cars
24
+
25
+ def _encode(self, images):
26
+ """
27
+ Encodes the given images into StyleGAN's latent space.
28
+ :param images: Tensor of shape NxCxHxW representing the images to be encoded.
29
+ :return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space).
30
+ """
31
+ codes = self.net.encoder(images)
32
+ assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}"
33
+ # normalize with respect to the center of an average face
34
+ if self.net.opts.start_from_latent_avg:
35
+ codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1)
36
+ return codes
37
+
38
+ def _generate(self, codes):
39
+ """
40
+ Generate the StyleGAN2 images of the given codes
41
+ :param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space).
42
+ :return: Tensor of shape NxCxHxW representing the generated images.
43
+ """
44
+ images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True)
45
+ images = self.net.face_pool(images)
46
+ if self.is_cars:
47
+ images = images[:, :, 32:224, :]
48
+ return images
49
+
50
+ @staticmethod
51
+ def _filter_outliers(arr):
52
+ arr = np.array(arr)
53
+
54
+ lo = np.percentile(arr, 1, interpolation="lower")
55
+ hi = np.percentile(arr, 99, interpolation="higher")
56
+ return np.extract(
57
+ np.logical_and(lo <= arr, arr <= hi), arr
58
+ )
59
+
60
+ def calculate_metric(self, data_loader, edit_function, inverse_edit_function):
61
+ """
62
+ Calculate the LEC metric score.
63
+ :param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader.
64
+ :param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the
65
+ latent space.
66
+ :param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the
67
+ `edit_function` parameter.
68
+ :return: The LEC metric score.
69
+ """
70
+ distances = []
71
+ with torch.no_grad():
72
+ for batch in data_loader:
73
+ x, _ = batch
74
+ inputs = x.to(device).float()
75
+
76
+ codes = self._encode(inputs)
77
+ edited_codes = edit_function(codes)
78
+ edited_image = self._generate(edited_codes)
79
+ edited_image_inversion_codes = self._encode(edited_image)
80
+ inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes)
81
+
82
+ dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean()
83
+ distances.append(dist.to("cpu").numpy())
84
+
85
+ distances = self._filter_outliers(distances)
86
+ return distances.mean()
87
+
88
+
89
+ if __name__ == "__main__":
90
+ device = "cuda"
91
+
92
+ parser = argparse.ArgumentParser(description="LEC metric calculator")
93
+
94
+ parser.add_argument("--batch", type=int, default=8, help="batch size for the models")
95
+ parser.add_argument("--images_dir", type=str, default=None,
96
+ help="Path to the images directory on which we calculate the LEC score")
97
+ parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints")
98
+
99
+ args = parser.parse_args()
100
+ print(args)
101
+
102
+ net, opts = setup_model(args.ckpt, device)
103
+ dataset_args = data_configs.DATASETS[opts.dataset_type]
104
+ transforms_dict = dataset_args['transforms'](opts).get_transforms()
105
+
106
+ images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir
107
+ test_dataset = ImagesDataset(source_root=images_directory,
108
+ target_root=images_directory,
109
+ source_transform=transforms_dict['transform_source'],
110
+ target_transform=transforms_dict['transform_test'],
111
+ opts=opts)
112
+
113
+ data_loader = DataLoader(test_dataset,
114
+ batch_size=args.batch,
115
+ shuffle=False,
116
+ num_workers=2,
117
+ drop_last=True)
118
+
119
+ print(f'dataset length: {len(test_dataset)}')
120
+
121
+ # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric.
122
+ # Change the provided example according to your domain and needs.
123
+ direction = torch.load('../editings/interfacegan_directions/age.pt').to(device)
124
+
125
+ def edit_func_example(codes):
126
+ return codes + 3 * direction
127
+
128
+
129
+ def inverse_edit_func_example(codes):
130
+ return codes - 3 * direction
131
+
132
+ lec = LEC(net, is_cars='car' in opts.dataset_type)
133
+ result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example)
134
+ print(f"LEC: {result}")
Time_TravelRephotography/models/encoder4editing/models/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/models/discriminator.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn
2
+
3
+
4
+ class LatentCodesDiscriminator(nn.Module):
5
+ def __init__(self, style_dim, n_mlp):
6
+ super().__init__()
7
+
8
+ self.style_dim = style_dim
9
+
10
+ layers = []
11
+ for i in range(n_mlp-1):
12
+ layers.append(
13
+ nn.Linear(style_dim, style_dim)
14
+ )
15
+ layers.append(nn.LeakyReLU(0.2))
16
+ layers.append(nn.Linear(512, 1))
17
+ self.mlp = nn.Sequential(*layers)
18
+
19
+ def forward(self, w):
20
+ return self.mlp(w)
Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/models/encoders/helpers.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
5
+
6
+ """
7
+ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
8
+ """
9
+
10
+
11
+ class Flatten(Module):
12
+ def forward(self, input):
13
+ return input.view(input.size(0), -1)
14
+
15
+
16
+ def l2_norm(input, axis=1):
17
+ norm = torch.norm(input, 2, axis, True)
18
+ output = torch.div(input, norm)
19
+ return output
20
+
21
+
22
+ class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
23
+ """ A named tuple describing a ResNet block. """
24
+
25
+
26
+ def get_block(in_channel, depth, num_units, stride=2):
27
+ return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
28
+
29
+
30
+ def get_blocks(num_layers):
31
+ if num_layers == 50:
32
+ blocks = [
33
+ get_block(in_channel=64, depth=64, num_units=3),
34
+ get_block(in_channel=64, depth=128, num_units=4),
35
+ get_block(in_channel=128, depth=256, num_units=14),
36
+ get_block(in_channel=256, depth=512, num_units=3)
37
+ ]
38
+ elif num_layers == 100:
39
+ blocks = [
40
+ get_block(in_channel=64, depth=64, num_units=3),
41
+ get_block(in_channel=64, depth=128, num_units=13),
42
+ get_block(in_channel=128, depth=256, num_units=30),
43
+ get_block(in_channel=256, depth=512, num_units=3)
44
+ ]
45
+ elif num_layers == 152:
46
+ blocks = [
47
+ get_block(in_channel=64, depth=64, num_units=3),
48
+ get_block(in_channel=64, depth=128, num_units=8),
49
+ get_block(in_channel=128, depth=256, num_units=36),
50
+ get_block(in_channel=256, depth=512, num_units=3)
51
+ ]
52
+ else:
53
+ raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
54
+ return blocks
55
+
56
+
57
+ class SEModule(Module):
58
+ def __init__(self, channels, reduction):
59
+ super(SEModule, self).__init__()
60
+ self.avg_pool = AdaptiveAvgPool2d(1)
61
+ self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
62
+ self.relu = ReLU(inplace=True)
63
+ self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
64
+ self.sigmoid = Sigmoid()
65
+
66
+ def forward(self, x):
67
+ module_input = x
68
+ x = self.avg_pool(x)
69
+ x = self.fc1(x)
70
+ x = self.relu(x)
71
+ x = self.fc2(x)
72
+ x = self.sigmoid(x)
73
+ return module_input * x
74
+
75
+
76
+ class bottleneck_IR(Module):
77
+ def __init__(self, in_channel, depth, stride):
78
+ super(bottleneck_IR, self).__init__()
79
+ if in_channel == depth:
80
+ self.shortcut_layer = MaxPool2d(1, stride)
81
+ else:
82
+ self.shortcut_layer = Sequential(
83
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
84
+ BatchNorm2d(depth)
85
+ )
86
+ self.res_layer = Sequential(
87
+ BatchNorm2d(in_channel),
88
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
89
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
90
+ )
91
+
92
+ def forward(self, x):
93
+ shortcut = self.shortcut_layer(x)
94
+ res = self.res_layer(x)
95
+ return res + shortcut
96
+
97
+
98
+ class bottleneck_IR_SE(Module):
99
+ def __init__(self, in_channel, depth, stride):
100
+ super(bottleneck_IR_SE, self).__init__()
101
+ if in_channel == depth:
102
+ self.shortcut_layer = MaxPool2d(1, stride)
103
+ else:
104
+ self.shortcut_layer = Sequential(
105
+ Conv2d(in_channel, depth, (1, 1), stride, bias=False),
106
+ BatchNorm2d(depth)
107
+ )
108
+ self.res_layer = Sequential(
109
+ BatchNorm2d(in_channel),
110
+ Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
111
+ PReLU(depth),
112
+ Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
113
+ BatchNorm2d(depth),
114
+ SEModule(depth, 16)
115
+ )
116
+
117
+ def forward(self, x):
118
+ shortcut = self.shortcut_layer(x)
119
+ res = self.res_layer(x)
120
+ return res + shortcut
121
+
122
+
123
+ def _upsample_add(x, y):
124
+ """Upsample and add two feature maps.
125
+ Args:
126
+ x: (Variable) top feature map to be upsampled.
127
+ y: (Variable) lateral feature map.
128
+ Returns:
129
+ (Variable) added feature map.
130
+ Note in PyTorch, when input size is odd, the upsampled feature map
131
+ with `F.upsample(..., scale_factor=2, mode='nearest')`
132
+ maybe not equal to the lateral feature map size.
133
+ e.g.
134
+ original input size: [N,_,15,15] ->
135
+ conv2d feature map size: [N,_,8,8] ->
136
+ upsampled feature map size: [N,_,16,16]
137
+ So we choose bilinear upsample which supports arbitrary output sizes.
138
+ """
139
+ _, _, H, W = y.size()
140
+ return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
Time_TravelRephotography/models/encoder4editing/models/encoders/model_irse.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
2
+ from models.encoders.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
3
+
4
+ """
5
+ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
6
+ """
7
+
8
+
9
+ class Backbone(Module):
10
+ def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
11
+ super(Backbone, self).__init__()
12
+ assert input_size in [112, 224], "input_size should be 112 or 224"
13
+ assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
14
+ assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
15
+ blocks = get_blocks(num_layers)
16
+ if mode == 'ir':
17
+ unit_module = bottleneck_IR
18
+ elif mode == 'ir_se':
19
+ unit_module = bottleneck_IR_SE
20
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
21
+ BatchNorm2d(64),
22
+ PReLU(64))
23
+ if input_size == 112:
24
+ self.output_layer = Sequential(BatchNorm2d(512),
25
+ Dropout(drop_ratio),
26
+ Flatten(),
27
+ Linear(512 * 7 * 7, 512),
28
+ BatchNorm1d(512, affine=affine))
29
+ else:
30
+ self.output_layer = Sequential(BatchNorm2d(512),
31
+ Dropout(drop_ratio),
32
+ Flatten(),
33
+ Linear(512 * 14 * 14, 512),
34
+ BatchNorm1d(512, affine=affine))
35
+
36
+ modules = []
37
+ for block in blocks:
38
+ for bottleneck in block:
39
+ modules.append(unit_module(bottleneck.in_channel,
40
+ bottleneck.depth,
41
+ bottleneck.stride))
42
+ self.body = Sequential(*modules)
43
+
44
+ def forward(self, x):
45
+ x = self.input_layer(x)
46
+ x = self.body(x)
47
+ x = self.output_layer(x)
48
+ return l2_norm(x)
49
+
50
+
51
+ def IR_50(input_size):
52
+ """Constructs a ir-50 model."""
53
+ model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
54
+ return model
55
+
56
+
57
+ def IR_101(input_size):
58
+ """Constructs a ir-101 model."""
59
+ model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
60
+ return model
61
+
62
+
63
+ def IR_152(input_size):
64
+ """Constructs a ir-152 model."""
65
+ model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
66
+ return model
67
+
68
+
69
+ def IR_SE_50(input_size):
70
+ """Constructs a ir_se-50 model."""
71
+ model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
72
+ return model
73
+
74
+
75
+ def IR_SE_101(input_size):
76
+ """Constructs a ir_se-101 model."""
77
+ model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
78
+ return model
79
+
80
+
81
+ def IR_SE_152(input_size):
82
+ """Constructs a ir_se-152 model."""
83
+ model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
84
+ return model
Time_TravelRephotography/models/encoder4editing/models/encoders/psp_encoders.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ import math
3
+ import numpy as np
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
7
+
8
+ from .helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
9
+ from ..stylegan2.model import EqualLinear
10
+
11
+
12
+ class ProgressiveStage(Enum):
13
+ WTraining = 0
14
+ Delta1Training = 1
15
+ Delta2Training = 2
16
+ Delta3Training = 3
17
+ Delta4Training = 4
18
+ Delta5Training = 5
19
+ Delta6Training = 6
20
+ Delta7Training = 7
21
+ Delta8Training = 8
22
+ Delta9Training = 9
23
+ Delta10Training = 10
24
+ Delta11Training = 11
25
+ Delta12Training = 12
26
+ Delta13Training = 13
27
+ Delta14Training = 14
28
+ Delta15Training = 15
29
+ Delta16Training = 16
30
+ Delta17Training = 17
31
+ Inference = 18
32
+
33
+
34
+ class GradualStyleBlock(Module):
35
+ def __init__(self, in_c, out_c, spatial):
36
+ super(GradualStyleBlock, self).__init__()
37
+ self.out_c = out_c
38
+ self.spatial = spatial
39
+ num_pools = int(np.log2(spatial))
40
+ modules = []
41
+ modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
42
+ nn.LeakyReLU()]
43
+ for i in range(num_pools - 1):
44
+ modules += [
45
+ Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
46
+ nn.LeakyReLU()
47
+ ]
48
+ self.convs = nn.Sequential(*modules)
49
+ self.linear = EqualLinear(out_c, out_c, lr_mul=1)
50
+
51
+ def forward(self, x):
52
+ x = self.convs(x)
53
+ x = x.view(-1, self.out_c)
54
+ x = self.linear(x)
55
+ return x
56
+
57
+
58
+ class GradualStyleEncoder(Module):
59
+ def __init__(self, num_layers, mode='ir', opts=None):
60
+ super(GradualStyleEncoder, self).__init__()
61
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
62
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
63
+ blocks = get_blocks(num_layers)
64
+ if mode == 'ir':
65
+ unit_module = bottleneck_IR
66
+ elif mode == 'ir_se':
67
+ unit_module = bottleneck_IR_SE
68
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
69
+ BatchNorm2d(64),
70
+ PReLU(64))
71
+ modules = []
72
+ for block in blocks:
73
+ for bottleneck in block:
74
+ modules.append(unit_module(bottleneck.in_channel,
75
+ bottleneck.depth,
76
+ bottleneck.stride))
77
+ self.body = Sequential(*modules)
78
+
79
+ self.styles = nn.ModuleList()
80
+ log_size = int(math.log(opts.stylegan_size, 2))
81
+ self.style_count = 2 * log_size - 2
82
+ self.coarse_ind = 3
83
+ self.middle_ind = 7
84
+ for i in range(self.style_count):
85
+ if i < self.coarse_ind:
86
+ style = GradualStyleBlock(512, 512, 16)
87
+ elif i < self.middle_ind:
88
+ style = GradualStyleBlock(512, 512, 32)
89
+ else:
90
+ style = GradualStyleBlock(512, 512, 64)
91
+ self.styles.append(style)
92
+ self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
93
+ self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
94
+
95
+ def forward(self, x):
96
+ x = self.input_layer(x)
97
+
98
+ latents = []
99
+ modulelist = list(self.body._modules.values())
100
+ for i, l in enumerate(modulelist):
101
+ x = l(x)
102
+ if i == 6:
103
+ c1 = x
104
+ elif i == 20:
105
+ c2 = x
106
+ elif i == 23:
107
+ c3 = x
108
+
109
+ for j in range(self.coarse_ind):
110
+ latents.append(self.styles[j](c3))
111
+
112
+ p2 = _upsample_add(c3, self.latlayer1(c2))
113
+ for j in range(self.coarse_ind, self.middle_ind):
114
+ latents.append(self.styles[j](p2))
115
+
116
+ p1 = _upsample_add(p2, self.latlayer2(c1))
117
+ for j in range(self.middle_ind, self.style_count):
118
+ latents.append(self.styles[j](p1))
119
+
120
+ out = torch.stack(latents, dim=1)
121
+ return out
122
+
123
+
124
+ class Encoder4Editing(Module):
125
+ def __init__(self, num_layers, mode='ir', opts=None):
126
+ super(Encoder4Editing, self).__init__()
127
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
128
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
129
+ blocks = get_blocks(num_layers)
130
+ if mode == 'ir':
131
+ unit_module = bottleneck_IR
132
+ elif mode == 'ir_se':
133
+ unit_module = bottleneck_IR_SE
134
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
135
+ BatchNorm2d(64),
136
+ PReLU(64))
137
+ modules = []
138
+ for block in blocks:
139
+ for bottleneck in block:
140
+ modules.append(unit_module(bottleneck.in_channel,
141
+ bottleneck.depth,
142
+ bottleneck.stride))
143
+ self.body = Sequential(*modules)
144
+
145
+ self.styles = nn.ModuleList()
146
+ log_size = int(math.log(opts.stylegan_size, 2))
147
+ self.style_count = 2 * log_size - 2
148
+ self.coarse_ind = 3
149
+ self.middle_ind = 7
150
+
151
+ for i in range(self.style_count):
152
+ if i < self.coarse_ind:
153
+ style = GradualStyleBlock(512, 512, 16)
154
+ elif i < self.middle_ind:
155
+ style = GradualStyleBlock(512, 512, 32)
156
+ else:
157
+ style = GradualStyleBlock(512, 512, 64)
158
+ self.styles.append(style)
159
+
160
+ self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
161
+ self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
162
+
163
+ self.progressive_stage = ProgressiveStage.Inference
164
+
165
+ def get_deltas_starting_dimensions(self):
166
+ ''' Get a list of the initial dimension of every delta from which it is applied '''
167
+ return list(range(self.style_count)) # Each dimension has a delta applied to it
168
+
169
+ def set_progressive_stage(self, new_stage: ProgressiveStage):
170
+ self.progressive_stage = new_stage
171
+ print('Changed progressive stage to: ', new_stage)
172
+
173
+ def forward(self, x):
174
+ x = self.input_layer(x)
175
+
176
+ modulelist = list(self.body._modules.values())
177
+ for i, l in enumerate(modulelist):
178
+ x = l(x)
179
+ if i == 6:
180
+ c1 = x
181
+ elif i == 20:
182
+ c2 = x
183
+ elif i == 23:
184
+ c3 = x
185
+
186
+ # Infer main W and duplicate it
187
+ w0 = self.styles[0](c3)
188
+ w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
189
+ stage = self.progressive_stage.value
190
+ features = c3
191
+ for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
192
+ if i == self.coarse_ind:
193
+ p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
194
+ features = p2
195
+ elif i == self.middle_ind:
196
+ p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
197
+ features = p1
198
+ delta_i = self.styles[i](features)
199
+ w[:, i] += delta_i
200
+ return w
201
+
202
+
203
+ class BackboneEncoderUsingLastLayerIntoW(Module):
204
+ def __init__(self, num_layers, mode='ir', opts=None):
205
+ super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
206
+ print('Using BackboneEncoderUsingLastLayerIntoW')
207
+ assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
208
+ assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
209
+ blocks = get_blocks(num_layers)
210
+ if mode == 'ir':
211
+ unit_module = bottleneck_IR
212
+ elif mode == 'ir_se':
213
+ unit_module = bottleneck_IR_SE
214
+ self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
215
+ BatchNorm2d(64),
216
+ PReLU(64))
217
+ self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
218
+ self.linear = EqualLinear(512, 512, lr_mul=1)
219
+ modules = []
220
+ for block in blocks:
221
+ for bottleneck in block:
222
+ modules.append(unit_module(bottleneck.in_channel,
223
+ bottleneck.depth,
224
+ bottleneck.stride))
225
+ self.body = Sequential(*modules)
226
+ log_size = int(math.log(opts.stylegan_size, 2))
227
+ self.style_count = 2 * log_size - 2
228
+
229
+ def forward(self, x):
230
+ x = self.input_layer(x)
231
+ x = self.body(x)
232
+ x = self.output_pool(x)
233
+ x = x.view(-1, 512)
234
+ x = self.linear(x)
235
+ return x.repeat(self.style_count, 1, 1).permute(1, 0, 2)
Time_TravelRephotography/models/encoder4editing/models/latent_codes_pool.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import torch
3
+
4
+
5
+ class LatentCodesPool:
6
+ """This class implements latent codes buffer that stores previously generated w latent codes.
7
+ This buffer enables us to update discriminators using a history of generated w's
8
+ rather than the ones produced by the latest encoder.
9
+ """
10
+
11
+ def __init__(self, pool_size):
12
+ """Initialize the ImagePool class
13
+ Parameters:
14
+ pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
15
+ """
16
+ self.pool_size = pool_size
17
+ if self.pool_size > 0: # create an empty pool
18
+ self.num_ws = 0
19
+ self.ws = []
20
+
21
+ def query(self, ws):
22
+ """Return w's from the pool.
23
+ Parameters:
24
+ ws: the latest generated w's from the generator
25
+ Returns w's from the buffer.
26
+ By 50/100, the buffer will return input w's.
27
+ By 50/100, the buffer will return w's previously stored in the buffer,
28
+ and insert the current w's to the buffer.
29
+ """
30
+ if self.pool_size == 0: # if the buffer size is 0, do nothing
31
+ return ws
32
+ return_ws = []
33
+ for w in ws: # ws.shape: (batch, 512) or (batch, n_latent, 512)
34
+ # w = torch.unsqueeze(image.data, 0)
35
+ if w.ndim == 2:
36
+ i = random.randint(0, len(w) - 1) # apply a random latent index as a candidate
37
+ w = w[i]
38
+ self.handle_w(w, return_ws)
39
+ return_ws = torch.stack(return_ws, 0) # collect all the images and return
40
+ return return_ws
41
+
42
+ def handle_w(self, w, return_ws):
43
+ if self.num_ws < self.pool_size: # if the buffer is not full; keep inserting current codes to the buffer
44
+ self.num_ws = self.num_ws + 1
45
+ self.ws.append(w)
46
+ return_ws.append(w)
47
+ else:
48
+ p = random.uniform(0, 1)
49
+ if p > 0.5: # by 50% chance, the buffer will return a previously stored latent code, and insert the current code into the buffer
50
+ random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
51
+ tmp = self.ws[random_id].clone()
52
+ self.ws[random_id] = w
53
+ return_ws.append(tmp)
54
+ else: # by another 50% chance, the buffer will return the current image
55
+ return_ws.append(w)
Time_TravelRephotography/models/encoder4editing/models/psp.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib
2
+
3
+ matplotlib.use('Agg')
4
+ import torch
5
+ from torch import nn
6
+ from .encoders import psp_encoders
7
+ from .stylegan2.model import Generator
8
+ from ..configs.paths_config import model_paths
9
+
10
+
11
+ def get_keys(d, name):
12
+ if 'state_dict' in d:
13
+ d = d['state_dict']
14
+ d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
15
+ return d_filt
16
+
17
+
18
+ class pSp(nn.Module):
19
+
20
+ def __init__(self, opts):
21
+ super(pSp, self).__init__()
22
+ self.opts = opts
23
+ # Define architecture
24
+ self.encoder = self.set_encoder()
25
+ self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2)
26
+ self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
27
+ # Load weights if needed
28
+ self.load_weights()
29
+
30
+ def set_encoder(self):
31
+ if self.opts.encoder_type == 'GradualStyleEncoder':
32
+ encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts)
33
+ elif self.opts.encoder_type == 'Encoder4Editing':
34
+ encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
35
+ elif self.opts.encoder_type == 'SingleStyleCodeEncoder':
36
+ encoder = psp_encoders.BackboneEncoderUsingLastLayerIntoW(50, 'ir_se', self.opts)
37
+ else:
38
+ raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type))
39
+ return encoder
40
+
41
+ def load_weights(self):
42
+ if self.opts.checkpoint_path is not None:
43
+ print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path))
44
+ ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
45
+ self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
46
+ self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True)
47
+ self.__load_latent_avg(ckpt)
48
+ else:
49
+ print('Loading encoders weights from irse50!')
50
+ encoder_ckpt = torch.load(model_paths['ir_se50'])
51
+ self.encoder.load_state_dict(encoder_ckpt, strict=False)
52
+ print('Loading decoder weights from pretrained!')
53
+ ckpt = torch.load(self.opts.stylegan_weights)
54
+ self.decoder.load_state_dict(ckpt['g_ema'], strict=False)
55
+ self.__load_latent_avg(ckpt, repeat=self.encoder.style_count)
56
+
57
+ def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True,
58
+ inject_latent=None, return_latents=False, alpha=None):
59
+ if input_code:
60
+ codes = x
61
+ else:
62
+ codes = self.encoder(x)
63
+ # normalize with respect to the center of an average face
64
+ if self.opts.start_from_latent_avg:
65
+ if codes.ndim == 2:
66
+ codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
67
+ else:
68
+ codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
69
+
70
+ if latent_mask is not None:
71
+ for i in latent_mask:
72
+ if inject_latent is not None:
73
+ if alpha is not None:
74
+ codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i]
75
+ else:
76
+ codes[:, i] = inject_latent[:, i]
77
+ else:
78
+ codes[:, i] = 0
79
+
80
+ input_is_latent = not input_code
81
+ images, result_latent = self.decoder([codes],
82
+ input_is_latent=input_is_latent,
83
+ randomize_noise=randomize_noise,
84
+ return_latents=return_latents)
85
+
86
+ if resize:
87
+ images = self.face_pool(images)
88
+
89
+ if return_latents:
90
+ return images, result_latent
91
+ else:
92
+ return images
93
+
94
+ def __load_latent_avg(self, ckpt, repeat=None):
95
+ if 'latent_avg' in ckpt:
96
+ self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
97
+ if repeat is not None:
98
+ self.latent_avg = self.latent_avg.repeat(repeat, 1)
99
+ else:
100
+ self.latent_avg = None
Time_TravelRephotography/models/encoder4editing/models/stylegan2/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/models/stylegan2/model.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+
7
+ from .op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
8
+
9
+
10
+ class PixelNorm(nn.Module):
11
+ def __init__(self):
12
+ super().__init__()
13
+
14
+ def forward(self, input):
15
+ return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
16
+
17
+
18
+ def make_kernel(k):
19
+ k = torch.tensor(k, dtype=torch.float32)
20
+
21
+ if k.ndim == 1:
22
+ k = k[None, :] * k[:, None]
23
+
24
+ k /= k.sum()
25
+
26
+ return k
27
+
28
+
29
+ class Upsample(nn.Module):
30
+ def __init__(self, kernel, factor=2):
31
+ super().__init__()
32
+
33
+ self.factor = factor
34
+ kernel = make_kernel(kernel) * (factor ** 2)
35
+ self.register_buffer('kernel', kernel)
36
+
37
+ p = kernel.shape[0] - factor
38
+
39
+ pad0 = (p + 1) // 2 + factor - 1
40
+ pad1 = p // 2
41
+
42
+ self.pad = (pad0, pad1)
43
+
44
+ def forward(self, input):
45
+ out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
46
+
47
+ return out
48
+
49
+
50
+ class Downsample(nn.Module):
51
+ def __init__(self, kernel, factor=2):
52
+ super().__init__()
53
+
54
+ self.factor = factor
55
+ kernel = make_kernel(kernel)
56
+ self.register_buffer('kernel', kernel)
57
+
58
+ p = kernel.shape[0] - factor
59
+
60
+ pad0 = (p + 1) // 2
61
+ pad1 = p // 2
62
+
63
+ self.pad = (pad0, pad1)
64
+
65
+ def forward(self, input):
66
+ out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
67
+
68
+ return out
69
+
70
+
71
+ class Blur(nn.Module):
72
+ def __init__(self, kernel, pad, upsample_factor=1):
73
+ super().__init__()
74
+
75
+ kernel = make_kernel(kernel)
76
+
77
+ if upsample_factor > 1:
78
+ kernel = kernel * (upsample_factor ** 2)
79
+
80
+ self.register_buffer('kernel', kernel)
81
+
82
+ self.pad = pad
83
+
84
+ def forward(self, input):
85
+ out = upfirdn2d(input, self.kernel, pad=self.pad)
86
+
87
+ return out
88
+
89
+
90
+ class EqualConv2d(nn.Module):
91
+ def __init__(
92
+ self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
93
+ ):
94
+ super().__init__()
95
+
96
+ self.weight = nn.Parameter(
97
+ torch.randn(out_channel, in_channel, kernel_size, kernel_size)
98
+ )
99
+ self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
100
+
101
+ self.stride = stride
102
+ self.padding = padding
103
+
104
+ if bias:
105
+ self.bias = nn.Parameter(torch.zeros(out_channel))
106
+
107
+ else:
108
+ self.bias = None
109
+
110
+ def forward(self, input):
111
+ out = F.conv2d(
112
+ input,
113
+ self.weight * self.scale,
114
+ bias=self.bias,
115
+ stride=self.stride,
116
+ padding=self.padding,
117
+ )
118
+
119
+ return out
120
+
121
+ def __repr__(self):
122
+ return (
123
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
124
+ f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
125
+ )
126
+
127
+
128
+ class EqualLinear(nn.Module):
129
+ def __init__(
130
+ self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
131
+ ):
132
+ super().__init__()
133
+
134
+ self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
135
+
136
+ if bias:
137
+ self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
138
+
139
+ else:
140
+ self.bias = None
141
+
142
+ self.activation = activation
143
+
144
+ self.scale = (1 / math.sqrt(in_dim)) * lr_mul
145
+ self.lr_mul = lr_mul
146
+
147
+ def forward(self, input):
148
+ if self.activation:
149
+ out = F.linear(input, self.weight * self.scale)
150
+ out = fused_leaky_relu(out, self.bias * self.lr_mul)
151
+
152
+ else:
153
+ out = F.linear(
154
+ input, self.weight * self.scale, bias=self.bias * self.lr_mul
155
+ )
156
+
157
+ return out
158
+
159
+ def __repr__(self):
160
+ return (
161
+ f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
162
+ )
163
+
164
+
165
+ class ScaledLeakyReLU(nn.Module):
166
+ def __init__(self, negative_slope=0.2):
167
+ super().__init__()
168
+
169
+ self.negative_slope = negative_slope
170
+
171
+ def forward(self, input):
172
+ out = F.leaky_relu(input, negative_slope=self.negative_slope)
173
+
174
+ return out * math.sqrt(2)
175
+
176
+
177
+ class ModulatedConv2d(nn.Module):
178
+ def __init__(
179
+ self,
180
+ in_channel,
181
+ out_channel,
182
+ kernel_size,
183
+ style_dim,
184
+ demodulate=True,
185
+ upsample=False,
186
+ downsample=False,
187
+ blur_kernel=[1, 3, 3, 1],
188
+ ):
189
+ super().__init__()
190
+
191
+ self.eps = 1e-8
192
+ self.kernel_size = kernel_size
193
+ self.in_channel = in_channel
194
+ self.out_channel = out_channel
195
+ self.upsample = upsample
196
+ self.downsample = downsample
197
+
198
+ if upsample:
199
+ factor = 2
200
+ p = (len(blur_kernel) - factor) - (kernel_size - 1)
201
+ pad0 = (p + 1) // 2 + factor - 1
202
+ pad1 = p // 2 + 1
203
+
204
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
205
+
206
+ if downsample:
207
+ factor = 2
208
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
209
+ pad0 = (p + 1) // 2
210
+ pad1 = p // 2
211
+
212
+ self.blur = Blur(blur_kernel, pad=(pad0, pad1))
213
+
214
+ fan_in = in_channel * kernel_size ** 2
215
+ self.scale = 1 / math.sqrt(fan_in)
216
+ self.padding = kernel_size // 2
217
+
218
+ self.weight = nn.Parameter(
219
+ torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
220
+ )
221
+
222
+ self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
223
+
224
+ self.demodulate = demodulate
225
+
226
+ def __repr__(self):
227
+ return (
228
+ f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
229
+ f'upsample={self.upsample}, downsample={self.downsample})'
230
+ )
231
+
232
+ def forward(self, input, style):
233
+ batch, in_channel, height, width = input.shape
234
+
235
+ style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
236
+ weight = self.scale * self.weight * style
237
+
238
+ if self.demodulate:
239
+ demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
240
+ weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
241
+
242
+ weight = weight.view(
243
+ batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
244
+ )
245
+
246
+ if self.upsample:
247
+ input = input.view(1, batch * in_channel, height, width)
248
+ weight = weight.view(
249
+ batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
250
+ )
251
+ weight = weight.transpose(1, 2).reshape(
252
+ batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
253
+ )
254
+ out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
255
+ _, _, height, width = out.shape
256
+ out = out.view(batch, self.out_channel, height, width)
257
+ out = self.blur(out)
258
+
259
+ elif self.downsample:
260
+ input = self.blur(input)
261
+ _, _, height, width = input.shape
262
+ input = input.view(1, batch * in_channel, height, width)
263
+ out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
264
+ _, _, height, width = out.shape
265
+ out = out.view(batch, self.out_channel, height, width)
266
+
267
+ else:
268
+ input = input.view(1, batch * in_channel, height, width)
269
+ out = F.conv2d(input, weight, padding=self.padding, groups=batch)
270
+ _, _, height, width = out.shape
271
+ out = out.view(batch, self.out_channel, height, width)
272
+
273
+ return out
274
+
275
+
276
+ class NoiseInjection(nn.Module):
277
+ def __init__(self):
278
+ super().__init__()
279
+
280
+ self.weight = nn.Parameter(torch.zeros(1))
281
+
282
+ def forward(self, image, noise=None):
283
+ if noise is None:
284
+ batch, _, height, width = image.shape
285
+ noise = image.new_empty(batch, 1, height, width).normal_()
286
+
287
+ return image + self.weight * noise
288
+
289
+
290
+ class ConstantInput(nn.Module):
291
+ def __init__(self, channel, size=4):
292
+ super().__init__()
293
+
294
+ self.input = nn.Parameter(torch.randn(1, channel, size, size))
295
+
296
+ def forward(self, input):
297
+ batch = input.shape[0]
298
+ out = self.input.repeat(batch, 1, 1, 1)
299
+
300
+ return out
301
+
302
+
303
+ class StyledConv(nn.Module):
304
+ def __init__(
305
+ self,
306
+ in_channel,
307
+ out_channel,
308
+ kernel_size,
309
+ style_dim,
310
+ upsample=False,
311
+ blur_kernel=[1, 3, 3, 1],
312
+ demodulate=True,
313
+ ):
314
+ super().__init__()
315
+
316
+ self.conv = ModulatedConv2d(
317
+ in_channel,
318
+ out_channel,
319
+ kernel_size,
320
+ style_dim,
321
+ upsample=upsample,
322
+ blur_kernel=blur_kernel,
323
+ demodulate=demodulate,
324
+ )
325
+
326
+ self.noise = NoiseInjection()
327
+ # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
328
+ # self.activate = ScaledLeakyReLU(0.2)
329
+ self.activate = FusedLeakyReLU(out_channel)
330
+
331
+ def forward(self, input, style, noise=None):
332
+ out = self.conv(input, style)
333
+ out = self.noise(out, noise=noise)
334
+ # out = out + self.bias
335
+ out = self.activate(out)
336
+
337
+ return out
338
+
339
+
340
+ class ToRGB(nn.Module):
341
+ def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
342
+ super().__init__()
343
+
344
+ if upsample:
345
+ self.upsample = Upsample(blur_kernel)
346
+
347
+ self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
348
+ self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
349
+
350
+ def forward(self, input, style, skip=None):
351
+ out = self.conv(input, style)
352
+ out = out + self.bias
353
+
354
+ if skip is not None:
355
+ skip = self.upsample(skip)
356
+
357
+ out = out + skip
358
+
359
+ return out
360
+
361
+
362
+ class Generator(nn.Module):
363
+ def __init__(
364
+ self,
365
+ size,
366
+ style_dim,
367
+ n_mlp,
368
+ channel_multiplier=2,
369
+ blur_kernel=[1, 3, 3, 1],
370
+ lr_mlp=0.01,
371
+ ):
372
+ super().__init__()
373
+
374
+ self.size = size
375
+
376
+ self.style_dim = style_dim
377
+
378
+ layers = [PixelNorm()]
379
+
380
+ for i in range(n_mlp):
381
+ layers.append(
382
+ EqualLinear(
383
+ style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
384
+ )
385
+ )
386
+
387
+ self.style = nn.Sequential(*layers)
388
+
389
+ self.channels = {
390
+ 4: 512,
391
+ 8: 512,
392
+ 16: 512,
393
+ 32: 512,
394
+ 64: 256 * channel_multiplier,
395
+ 128: 128 * channel_multiplier,
396
+ 256: 64 * channel_multiplier,
397
+ 512: 32 * channel_multiplier,
398
+ 1024: 16 * channel_multiplier,
399
+ }
400
+
401
+ self.input = ConstantInput(self.channels[4])
402
+ self.conv1 = StyledConv(
403
+ self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
404
+ )
405
+ self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
406
+
407
+ self.log_size = int(math.log(size, 2))
408
+ self.num_layers = (self.log_size - 2) * 2 + 1
409
+
410
+ self.convs = nn.ModuleList()
411
+ self.upsamples = nn.ModuleList()
412
+ self.to_rgbs = nn.ModuleList()
413
+ self.noises = nn.Module()
414
+
415
+ in_channel = self.channels[4]
416
+
417
+ for layer_idx in range(self.num_layers):
418
+ res = (layer_idx + 5) // 2
419
+ shape = [1, 1, 2 ** res, 2 ** res]
420
+ self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
421
+
422
+ for i in range(3, self.log_size + 1):
423
+ out_channel = self.channels[2 ** i]
424
+
425
+ self.convs.append(
426
+ StyledConv(
427
+ in_channel,
428
+ out_channel,
429
+ 3,
430
+ style_dim,
431
+ upsample=True,
432
+ blur_kernel=blur_kernel,
433
+ )
434
+ )
435
+
436
+ self.convs.append(
437
+ StyledConv(
438
+ out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
439
+ )
440
+ )
441
+
442
+ self.to_rgbs.append(ToRGB(out_channel, style_dim))
443
+
444
+ in_channel = out_channel
445
+
446
+ self.n_latent = self.log_size * 2 - 2
447
+
448
+ def make_noise(self):
449
+ device = self.input.input.device
450
+
451
+ noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
452
+
453
+ for i in range(3, self.log_size + 1):
454
+ for _ in range(2):
455
+ noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
456
+
457
+ return noises
458
+
459
+ def mean_latent(self, n_latent):
460
+ latent_in = torch.randn(
461
+ n_latent, self.style_dim, device=self.input.input.device
462
+ )
463
+ latent = self.style(latent_in).mean(0, keepdim=True)
464
+
465
+ return latent
466
+
467
+ def get_latent(self, input):
468
+ return self.style(input)
469
+
470
+ def forward(
471
+ self,
472
+ styles,
473
+ return_latents=False,
474
+ return_features=False,
475
+ inject_index=None,
476
+ truncation=1,
477
+ truncation_latent=None,
478
+ input_is_latent=False,
479
+ noise=None,
480
+ randomize_noise=True,
481
+ ):
482
+ if not input_is_latent:
483
+ styles = [self.style(s) for s in styles]
484
+
485
+ if noise is None:
486
+ if randomize_noise:
487
+ noise = [None] * self.num_layers
488
+ else:
489
+ noise = [
490
+ getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
491
+ ]
492
+
493
+ if truncation < 1:
494
+ style_t = []
495
+
496
+ for style in styles:
497
+ style_t.append(
498
+ truncation_latent + truncation * (style - truncation_latent)
499
+ )
500
+
501
+ styles = style_t
502
+
503
+ if len(styles) < 2:
504
+ inject_index = self.n_latent
505
+
506
+ if styles[0].ndim < 3:
507
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
508
+ else:
509
+ latent = styles[0]
510
+
511
+ else:
512
+ if inject_index is None:
513
+ inject_index = random.randint(1, self.n_latent - 1)
514
+
515
+ latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
516
+ latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
517
+
518
+ latent = torch.cat([latent, latent2], 1)
519
+
520
+ out = self.input(latent)
521
+ out = self.conv1(out, latent[:, 0], noise=noise[0])
522
+
523
+ skip = self.to_rgb1(out, latent[:, 1])
524
+
525
+ i = 1
526
+ for conv1, conv2, noise1, noise2, to_rgb in zip(
527
+ self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
528
+ ):
529
+ out = conv1(out, latent[:, i], noise=noise1)
530
+ out = conv2(out, latent[:, i + 1], noise=noise2)
531
+ skip = to_rgb(out, latent[:, i + 2], skip)
532
+
533
+ i += 2
534
+
535
+ image = skip
536
+
537
+ if return_latents:
538
+ return image, latent
539
+ elif return_features:
540
+ return image, out
541
+ else:
542
+ return image, None
543
+
544
+
545
+ class ConvLayer(nn.Sequential):
546
+ def __init__(
547
+ self,
548
+ in_channel,
549
+ out_channel,
550
+ kernel_size,
551
+ downsample=False,
552
+ blur_kernel=[1, 3, 3, 1],
553
+ bias=True,
554
+ activate=True,
555
+ ):
556
+ layers = []
557
+
558
+ if downsample:
559
+ factor = 2
560
+ p = (len(blur_kernel) - factor) + (kernel_size - 1)
561
+ pad0 = (p + 1) // 2
562
+ pad1 = p // 2
563
+
564
+ layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
565
+
566
+ stride = 2
567
+ self.padding = 0
568
+
569
+ else:
570
+ stride = 1
571
+ self.padding = kernel_size // 2
572
+
573
+ layers.append(
574
+ EqualConv2d(
575
+ in_channel,
576
+ out_channel,
577
+ kernel_size,
578
+ padding=self.padding,
579
+ stride=stride,
580
+ bias=bias and not activate,
581
+ )
582
+ )
583
+
584
+ if activate:
585
+ if bias:
586
+ layers.append(FusedLeakyReLU(out_channel))
587
+
588
+ else:
589
+ layers.append(ScaledLeakyReLU(0.2))
590
+
591
+ super().__init__(*layers)
592
+
593
+
594
+ class ResBlock(nn.Module):
595
+ def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
596
+ super().__init__()
597
+
598
+ self.conv1 = ConvLayer(in_channel, in_channel, 3)
599
+ self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
600
+
601
+ self.skip = ConvLayer(
602
+ in_channel, out_channel, 1, downsample=True, activate=False, bias=False
603
+ )
604
+
605
+ def forward(self, input):
606
+ out = self.conv1(input)
607
+ out = self.conv2(out)
608
+
609
+ skip = self.skip(input)
610
+ out = (out + skip) / math.sqrt(2)
611
+
612
+ return out
613
+
614
+
615
+ class Discriminator(nn.Module):
616
+ def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
617
+ super().__init__()
618
+
619
+ channels = {
620
+ 4: 512,
621
+ 8: 512,
622
+ 16: 512,
623
+ 32: 512,
624
+ 64: 256 * channel_multiplier,
625
+ 128: 128 * channel_multiplier,
626
+ 256: 64 * channel_multiplier,
627
+ 512: 32 * channel_multiplier,
628
+ 1024: 16 * channel_multiplier,
629
+ }
630
+
631
+ convs = [ConvLayer(3, channels[size], 1)]
632
+
633
+ log_size = int(math.log(size, 2))
634
+
635
+ in_channel = channels[size]
636
+
637
+ for i in range(log_size, 2, -1):
638
+ out_channel = channels[2 ** (i - 1)]
639
+
640
+ convs.append(ResBlock(in_channel, out_channel, blur_kernel))
641
+
642
+ in_channel = out_channel
643
+
644
+ self.convs = nn.Sequential(*convs)
645
+
646
+ self.stddev_group = 4
647
+ self.stddev_feat = 1
648
+
649
+ self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
650
+ self.final_linear = nn.Sequential(
651
+ EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
652
+ EqualLinear(channels[4], 1),
653
+ )
654
+
655
+ def forward(self, input):
656
+ out = self.convs(input)
657
+
658
+ batch, channel, height, width = out.shape
659
+ group = min(batch, self.stddev_group)
660
+ stddev = out.view(
661
+ group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
662
+ )
663
+ stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
664
+ stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
665
+ stddev = stddev.repeat(group, 1, height, width)
666
+ out = torch.cat([out, stddev], 1)
667
+
668
+ out = self.final_conv(out)
669
+
670
+ out = out.view(batch, -1)
671
+ out = self.final_linear(out)
672
+
673
+ return out
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .fused_act import FusedLeakyReLU, fused_leaky_relu
2
+ from .upfirdn2d import upfirdn2d
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_act.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.autograd import Function
6
+ from torch.utils.cpp_extension import load
7
+
8
+ module_path = os.path.dirname(__file__)
9
+ fused = load(
10
+ 'fused',
11
+ sources=[
12
+ os.path.join(module_path, 'fused_bias_act.cpp'),
13
+ os.path.join(module_path, 'fused_bias_act_kernel.cu'),
14
+ ],
15
+ )
16
+
17
+
18
+ class FusedLeakyReLUFunctionBackward(Function):
19
+ @staticmethod
20
+ def forward(ctx, grad_output, out, negative_slope, scale):
21
+ ctx.save_for_backward(out)
22
+ ctx.negative_slope = negative_slope
23
+ ctx.scale = scale
24
+
25
+ empty = grad_output.new_empty(0)
26
+
27
+ grad_input = fused.fused_bias_act(
28
+ grad_output, empty, out, 3, 1, negative_slope, scale
29
+ )
30
+
31
+ dim = [0]
32
+
33
+ if grad_input.ndim > 2:
34
+ dim += list(range(2, grad_input.ndim))
35
+
36
+ grad_bias = grad_input.sum(dim).detach()
37
+
38
+ return grad_input, grad_bias
39
+
40
+ @staticmethod
41
+ def backward(ctx, gradgrad_input, gradgrad_bias):
42
+ out, = ctx.saved_tensors
43
+ gradgrad_out = fused.fused_bias_act(
44
+ gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
45
+ )
46
+
47
+ return gradgrad_out, None, None, None
48
+
49
+
50
+ class FusedLeakyReLUFunction(Function):
51
+ @staticmethod
52
+ def forward(ctx, input, bias, negative_slope, scale):
53
+ empty = input.new_empty(0)
54
+ out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
55
+ ctx.save_for_backward(out)
56
+ ctx.negative_slope = negative_slope
57
+ ctx.scale = scale
58
+
59
+ return out
60
+
61
+ @staticmethod
62
+ def backward(ctx, grad_output):
63
+ out, = ctx.saved_tensors
64
+
65
+ grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
66
+ grad_output, out, ctx.negative_slope, ctx.scale
67
+ )
68
+
69
+ return grad_input, grad_bias, None, None
70
+
71
+
72
+ class FusedLeakyReLU(nn.Module):
73
+ def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
74
+ super().__init__()
75
+
76
+ self.bias = nn.Parameter(torch.zeros(channel))
77
+ self.negative_slope = negative_slope
78
+ self.scale = scale
79
+
80
+ def forward(self, input):
81
+ return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
82
+
83
+
84
+ def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
85
+ return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_bias_act.cpp ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+
4
+ torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
5
+ int act, int grad, float alpha, float scale);
6
+
7
+ #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
8
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
9
+ #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
10
+
11
+ torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
12
+ int act, int grad, float alpha, float scale) {
13
+ CHECK_CUDA(input);
14
+ CHECK_CUDA(bias);
15
+
16
+ return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
17
+ }
18
+
19
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20
+ m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
21
+ }
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/fused_bias_act_kernel.cu ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
2
+ //
3
+ // This work is made available under the Nvidia Source Code License-NC.
4
+ // To view a copy of this license, visit
5
+ // https://nvlabs.github.io/stylegan2/license.html
6
+
7
+ #include <torch/types.h>
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/AccumulateType.h>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+
18
+ template <typename scalar_t>
19
+ static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,
20
+ int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {
21
+ int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;
22
+
23
+ scalar_t zero = 0.0;
24
+
25
+ for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {
26
+ scalar_t x = p_x[xi];
27
+
28
+ if (use_bias) {
29
+ x += p_b[(xi / step_b) % size_b];
30
+ }
31
+
32
+ scalar_t ref = use_ref ? p_ref[xi] : zero;
33
+
34
+ scalar_t y;
35
+
36
+ switch (act * 10 + grad) {
37
+ default:
38
+ case 10: y = x; break;
39
+ case 11: y = x; break;
40
+ case 12: y = 0.0; break;
41
+
42
+ case 30: y = (x > 0.0) ? x : x * alpha; break;
43
+ case 31: y = (ref > 0.0) ? x : x * alpha; break;
44
+ case 32: y = 0.0; break;
45
+ }
46
+
47
+ out[xi] = y * scale;
48
+ }
49
+ }
50
+
51
+
52
+ torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
53
+ int act, int grad, float alpha, float scale) {
54
+ int curDevice = -1;
55
+ cudaGetDevice(&curDevice);
56
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
57
+
58
+ auto x = input.contiguous();
59
+ auto b = bias.contiguous();
60
+ auto ref = refer.contiguous();
61
+
62
+ int use_bias = b.numel() ? 1 : 0;
63
+ int use_ref = ref.numel() ? 1 : 0;
64
+
65
+ int size_x = x.numel();
66
+ int size_b = b.numel();
67
+ int step_b = 1;
68
+
69
+ for (int i = 1 + 1; i < x.dim(); i++) {
70
+ step_b *= x.size(i);
71
+ }
72
+
73
+ int loop_x = 4;
74
+ int block_size = 4 * 32;
75
+ int grid_size = (size_x - 1) / (loop_x * block_size) + 1;
76
+
77
+ auto y = torch::empty_like(x);
78
+
79
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] {
80
+ fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(
81
+ y.data_ptr<scalar_t>(),
82
+ x.data_ptr<scalar_t>(),
83
+ b.data_ptr<scalar_t>(),
84
+ ref.data_ptr<scalar_t>(),
85
+ act,
86
+ grad,
87
+ alpha,
88
+ scale,
89
+ loop_x,
90
+ size_x,
91
+ step_b,
92
+ size_b,
93
+ use_bias,
94
+ use_ref
95
+ );
96
+ });
97
+
98
+ return y;
99
+ }
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d.cpp ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+
4
+ torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
5
+ int up_x, int up_y, int down_x, int down_y,
6
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1);
7
+
8
+ #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
9
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
10
+ #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
11
+
12
+ torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
13
+ int up_x, int up_y, int down_x, int down_y,
14
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
15
+ CHECK_CUDA(input);
16
+ CHECK_CUDA(kernel);
17
+
18
+ return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);
19
+ }
20
+
21
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
22
+ m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)");
23
+ }
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+ from torch.autograd import Function
5
+ from torch.utils.cpp_extension import load
6
+
7
+ module_path = os.path.dirname(__file__)
8
+ upfirdn2d_op = load(
9
+ 'upfirdn2d',
10
+ sources=[
11
+ os.path.join(module_path, 'upfirdn2d.cpp'),
12
+ os.path.join(module_path, 'upfirdn2d_kernel.cu'),
13
+ ],
14
+ )
15
+
16
+
17
+ class UpFirDn2dBackward(Function):
18
+ @staticmethod
19
+ def forward(
20
+ ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
21
+ ):
22
+ up_x, up_y = up
23
+ down_x, down_y = down
24
+ g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
25
+
26
+ grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
27
+
28
+ grad_input = upfirdn2d_op.upfirdn2d(
29
+ grad_output,
30
+ grad_kernel,
31
+ down_x,
32
+ down_y,
33
+ up_x,
34
+ up_y,
35
+ g_pad_x0,
36
+ g_pad_x1,
37
+ g_pad_y0,
38
+ g_pad_y1,
39
+ )
40
+ grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
41
+
42
+ ctx.save_for_backward(kernel)
43
+
44
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
45
+
46
+ ctx.up_x = up_x
47
+ ctx.up_y = up_y
48
+ ctx.down_x = down_x
49
+ ctx.down_y = down_y
50
+ ctx.pad_x0 = pad_x0
51
+ ctx.pad_x1 = pad_x1
52
+ ctx.pad_y0 = pad_y0
53
+ ctx.pad_y1 = pad_y1
54
+ ctx.in_size = in_size
55
+ ctx.out_size = out_size
56
+
57
+ return grad_input
58
+
59
+ @staticmethod
60
+ def backward(ctx, gradgrad_input):
61
+ kernel, = ctx.saved_tensors
62
+
63
+ gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
64
+
65
+ gradgrad_out = upfirdn2d_op.upfirdn2d(
66
+ gradgrad_input,
67
+ kernel,
68
+ ctx.up_x,
69
+ ctx.up_y,
70
+ ctx.down_x,
71
+ ctx.down_y,
72
+ ctx.pad_x0,
73
+ ctx.pad_x1,
74
+ ctx.pad_y0,
75
+ ctx.pad_y1,
76
+ )
77
+ # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
78
+ gradgrad_out = gradgrad_out.view(
79
+ ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
80
+ )
81
+
82
+ return gradgrad_out, None, None, None, None, None, None, None, None
83
+
84
+
85
+ class UpFirDn2d(Function):
86
+ @staticmethod
87
+ def forward(ctx, input, kernel, up, down, pad):
88
+ up_x, up_y = up
89
+ down_x, down_y = down
90
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
91
+
92
+ kernel_h, kernel_w = kernel.shape
93
+ batch, channel, in_h, in_w = input.shape
94
+ ctx.in_size = input.shape
95
+
96
+ input = input.reshape(-1, in_h, in_w, 1)
97
+
98
+ ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
99
+
100
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
101
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
102
+ ctx.out_size = (out_h, out_w)
103
+
104
+ ctx.up = (up_x, up_y)
105
+ ctx.down = (down_x, down_y)
106
+ ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
107
+
108
+ g_pad_x0 = kernel_w - pad_x0 - 1
109
+ g_pad_y0 = kernel_h - pad_y0 - 1
110
+ g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
111
+ g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
112
+
113
+ ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
114
+
115
+ out = upfirdn2d_op.upfirdn2d(
116
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
117
+ )
118
+ # out = out.view(major, out_h, out_w, minor)
119
+ out = out.view(-1, channel, out_h, out_w)
120
+
121
+ return out
122
+
123
+ @staticmethod
124
+ def backward(ctx, grad_output):
125
+ kernel, grad_kernel = ctx.saved_tensors
126
+
127
+ grad_input = UpFirDn2dBackward.apply(
128
+ grad_output,
129
+ kernel,
130
+ grad_kernel,
131
+ ctx.up,
132
+ ctx.down,
133
+ ctx.pad,
134
+ ctx.g_pad,
135
+ ctx.in_size,
136
+ ctx.out_size,
137
+ )
138
+
139
+ return grad_input, None, None, None, None
140
+
141
+
142
+ def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
143
+ out = UpFirDn2d.apply(
144
+ input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
145
+ )
146
+
147
+ return out
148
+
149
+
150
+ def upfirdn2d_native(
151
+ input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
152
+ ):
153
+ _, in_h, in_w, minor = input.shape
154
+ kernel_h, kernel_w = kernel.shape
155
+
156
+ out = input.view(-1, in_h, 1, in_w, 1, minor)
157
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
158
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
159
+
160
+ out = F.pad(
161
+ out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
162
+ )
163
+ out = out[
164
+ :,
165
+ max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0),
166
+ max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0),
167
+ :,
168
+ ]
169
+
170
+ out = out.permute(0, 3, 1, 2)
171
+ out = out.reshape(
172
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
173
+ )
174
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
175
+ out = F.conv2d(out, w)
176
+ out = out.reshape(
177
+ -1,
178
+ minor,
179
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
180
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
181
+ )
182
+ out = out.permute(0, 2, 3, 1)
183
+
184
+ return out[:, ::down_y, ::down_x, :]
Time_TravelRephotography/models/encoder4editing/models/stylegan2/op/upfirdn2d_kernel.cu ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
2
+ //
3
+ // This work is made available under the Nvidia Source Code License-NC.
4
+ // To view a copy of this license, visit
5
+ // https://nvlabs.github.io/stylegan2/license.html
6
+
7
+ #include <torch/types.h>
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/AccumulateType.h>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+
18
+ static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
19
+ int c = a / b;
20
+
21
+ if (c * b > a) {
22
+ c--;
23
+ }
24
+
25
+ return c;
26
+ }
27
+
28
+
29
+ struct UpFirDn2DKernelParams {
30
+ int up_x;
31
+ int up_y;
32
+ int down_x;
33
+ int down_y;
34
+ int pad_x0;
35
+ int pad_x1;
36
+ int pad_y0;
37
+ int pad_y1;
38
+
39
+ int major_dim;
40
+ int in_h;
41
+ int in_w;
42
+ int minor_dim;
43
+ int kernel_h;
44
+ int kernel_w;
45
+ int out_h;
46
+ int out_w;
47
+ int loop_major;
48
+ int loop_x;
49
+ };
50
+
51
+
52
+ template <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
53
+ __global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) {
54
+ const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
55
+ const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
56
+
57
+ __shared__ volatile float sk[kernel_h][kernel_w];
58
+ __shared__ volatile float sx[tile_in_h][tile_in_w];
59
+
60
+ int minor_idx = blockIdx.x;
61
+ int tile_out_y = minor_idx / p.minor_dim;
62
+ minor_idx -= tile_out_y * p.minor_dim;
63
+ tile_out_y *= tile_out_h;
64
+ int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
65
+ int major_idx_base = blockIdx.z * p.loop_major;
66
+
67
+ if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) {
68
+ return;
69
+ }
70
+
71
+ for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) {
72
+ int ky = tap_idx / kernel_w;
73
+ int kx = tap_idx - ky * kernel_w;
74
+ scalar_t v = 0.0;
75
+
76
+ if (kx < p.kernel_w & ky < p.kernel_h) {
77
+ v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
78
+ }
79
+
80
+ sk[ky][kx] = v;
81
+ }
82
+
83
+ for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) {
84
+ for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) {
85
+ int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
86
+ int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
87
+ int tile_in_x = floor_div(tile_mid_x, up_x);
88
+ int tile_in_y = floor_div(tile_mid_y, up_y);
89
+
90
+ __syncthreads();
91
+
92
+ for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) {
93
+ int rel_in_y = in_idx / tile_in_w;
94
+ int rel_in_x = in_idx - rel_in_y * tile_in_w;
95
+ int in_x = rel_in_x + tile_in_x;
96
+ int in_y = rel_in_y + tile_in_y;
97
+
98
+ scalar_t v = 0.0;
99
+
100
+ if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
101
+ v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx];
102
+ }
103
+
104
+ sx[rel_in_y][rel_in_x] = v;
105
+ }
106
+
107
+ __syncthreads();
108
+ for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) {
109
+ int rel_out_y = out_idx / tile_out_w;
110
+ int rel_out_x = out_idx - rel_out_y * tile_out_w;
111
+ int out_x = rel_out_x + tile_out_x;
112
+ int out_y = rel_out_y + tile_out_y;
113
+
114
+ int mid_x = tile_mid_x + rel_out_x * down_x;
115
+ int mid_y = tile_mid_y + rel_out_y * down_y;
116
+ int in_x = floor_div(mid_x, up_x);
117
+ int in_y = floor_div(mid_y, up_y);
118
+ int rel_in_x = in_x - tile_in_x;
119
+ int rel_in_y = in_y - tile_in_y;
120
+ int kernel_x = (in_x + 1) * up_x - mid_x - 1;
121
+ int kernel_y = (in_y + 1) * up_y - mid_y - 1;
122
+
123
+ scalar_t v = 0.0;
124
+
125
+ #pragma unroll
126
+ for (int y = 0; y < kernel_h / up_y; y++)
127
+ #pragma unroll
128
+ for (int x = 0; x < kernel_w / up_x; x++)
129
+ v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x];
130
+
131
+ if (out_x < p.out_w & out_y < p.out_h) {
132
+ out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v;
133
+ }
134
+ }
135
+ }
136
+ }
137
+ }
138
+
139
+
140
+ torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,
141
+ int up_x, int up_y, int down_x, int down_y,
142
+ int pad_x0, int pad_x1, int pad_y0, int pad_y1) {
143
+ int curDevice = -1;
144
+ cudaGetDevice(&curDevice);
145
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
146
+
147
+ UpFirDn2DKernelParams p;
148
+
149
+ auto x = input.contiguous();
150
+ auto k = kernel.contiguous();
151
+
152
+ p.major_dim = x.size(0);
153
+ p.in_h = x.size(1);
154
+ p.in_w = x.size(2);
155
+ p.minor_dim = x.size(3);
156
+ p.kernel_h = k.size(0);
157
+ p.kernel_w = k.size(1);
158
+ p.up_x = up_x;
159
+ p.up_y = up_y;
160
+ p.down_x = down_x;
161
+ p.down_y = down_y;
162
+ p.pad_x0 = pad_x0;
163
+ p.pad_x1 = pad_x1;
164
+ p.pad_y0 = pad_y0;
165
+ p.pad_y1 = pad_y1;
166
+
167
+ p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y;
168
+ p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x;
169
+
170
+ auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
171
+
172
+ int mode = -1;
173
+
174
+ int tile_out_h;
175
+ int tile_out_w;
176
+
177
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
178
+ mode = 1;
179
+ tile_out_h = 16;
180
+ tile_out_w = 64;
181
+ }
182
+
183
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) {
184
+ mode = 2;
185
+ tile_out_h = 16;
186
+ tile_out_w = 64;
187
+ }
188
+
189
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {
190
+ mode = 3;
191
+ tile_out_h = 16;
192
+ tile_out_w = 64;
193
+ }
194
+
195
+ if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) {
196
+ mode = 4;
197
+ tile_out_h = 16;
198
+ tile_out_w = 64;
199
+ }
200
+
201
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) {
202
+ mode = 5;
203
+ tile_out_h = 8;
204
+ tile_out_w = 32;
205
+ }
206
+
207
+ if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) {
208
+ mode = 6;
209
+ tile_out_h = 8;
210
+ tile_out_w = 32;
211
+ }
212
+
213
+ dim3 block_size;
214
+ dim3 grid_size;
215
+
216
+ if (tile_out_h > 0 && tile_out_w) {
217
+ p.loop_major = (p.major_dim - 1) / 16384 + 1;
218
+ p.loop_x = 1;
219
+ block_size = dim3(32 * 8, 1, 1);
220
+ grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
221
+ (p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
222
+ (p.major_dim - 1) / p.loop_major + 1);
223
+ }
224
+
225
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
226
+ switch (mode) {
227
+ case 1:
228
+ upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
229
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
230
+ );
231
+
232
+ break;
233
+
234
+ case 2:
235
+ upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>(
236
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
237
+ );
238
+
239
+ break;
240
+
241
+ case 3:
242
+ upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(
243
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
244
+ );
245
+
246
+ break;
247
+
248
+ case 4:
249
+ upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>(
250
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
251
+ );
252
+
253
+ break;
254
+
255
+ case 5:
256
+ upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
257
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
258
+ );
259
+
260
+ break;
261
+
262
+ case 6:
263
+ upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(
264
+ out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p
265
+ );
266
+
267
+ break;
268
+ }
269
+ });
270
+
271
+ return out;
272
+ }
Time_TravelRephotography/models/encoder4editing/options/__init__.py ADDED
File without changes
Time_TravelRephotography/models/encoder4editing/options/train_options.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from argparse import ArgumentParser
2
+ from configs.paths_config import model_paths
3
+
4
+
5
+ class TrainOptions:
6
+
7
+ def __init__(self):
8
+ self.parser = ArgumentParser()
9
+ self.initialize()
10
+
11
+ def initialize(self):
12
+ self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory')
13
+ self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str,
14
+ help='Type of dataset/experiment to run')
15
+ self.parser.add_argument('--encoder_type', default='Encoder4Editing', type=str, help='Which encoder to use')
16
+
17
+ self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training')
18
+ self.parser.add_argument('--test_batch_size', default=2, type=int, help='Batch size for testing and inference')
19
+ self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')
20
+ self.parser.add_argument('--test_workers', default=2, type=int,
21
+ help='Number of test/inference dataloader workers')
22
+
23
+ self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate')
24
+ self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use')
25
+ self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model')
26
+ self.parser.add_argument('--start_from_latent_avg', action='store_true',
27
+ help='Whether to add average latent vector to generate codes from encoder.')
28
+ self.parser.add_argument('--lpips_type', default='alex', type=str, help='LPIPS backbone')
29
+
30
+ self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')
31
+ self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')
32
+ self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')
33
+
34
+ self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str,
35
+ help='Path to StyleGAN model weights')
36
+ self.parser.add_argument('--stylegan_size', default=1024, type=int,
37
+ help='size of pretrained StyleGAN Generator')
38
+ self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pSp model checkpoint')
39
+
40
+ self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps')
41
+ self.parser.add_argument('--image_interval', default=100, type=int,
42
+ help='Interval for logging train images during training')
43
+ self.parser.add_argument('--board_interval', default=50, type=int,
44
+ help='Interval for logging metrics to tensorboard')
45
+ self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval')
46
+ self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval')
47
+
48
+ # Discriminator flags
49
+ self.parser.add_argument('--w_discriminator_lambda', default=0, type=float, help='Dw loss multiplier')
50
+ self.parser.add_argument('--w_discriminator_lr', default=2e-5, type=float, help='Dw learning rate')
51
+ self.parser.add_argument("--r1", type=float, default=10, help="weight of the r1 regularization")
52
+ self.parser.add_argument("--d_reg_every", type=int, default=16,
53
+ help="interval for applying r1 regularization")
54
+ self.parser.add_argument('--use_w_pool', action='store_true',
55
+ help='Whether to store a latnet codes pool for the discriminator\'s training')
56
+ self.parser.add_argument("--w_pool_size", type=int, default=50,
57
+ help="W\'s pool size, depends on --use_w_pool")
58
+
59
+ # e4e specific
60
+ self.parser.add_argument('--delta_norm', type=int, default=2, help="norm type of the deltas")
61
+ self.parser.add_argument('--delta_norm_lambda', type=float, default=2e-4, help="lambda for delta norm loss")
62
+
63
+ # Progressive training
64
+ self.parser.add_argument('--progressive_steps', nargs='+', type=int, default=None,
65
+ help="The training steps of training new deltas. steps[i] starts the delta_i training")
66
+ self.parser.add_argument('--progressive_start', type=int, default=None,
67
+ help="The training step to start training the deltas, overrides progressive_steps")
68
+ self.parser.add_argument('--progressive_step_every', type=int, default=2_000,
69
+ help="Amount of training steps for each progressive step")
70
+
71
+ # Save additional training info to enable future training continuation from produced checkpoints
72
+ self.parser.add_argument('--save_training_data', action='store_true',
73
+ help='Save intermediate training data to resume training from the checkpoint')
74
+ self.parser.add_argument('--sub_exp_dir', default=None, type=str, help='Name of sub experiment directory')
75
+ self.parser.add_argument('--keep_optimizer', action='store_true',
76
+ help='Whether to continue from the checkpoint\'s optimizer')
77
+ self.parser.add_argument('--resume_training_from_ckpt', default=None, type=str,
78
+ help='Path to training checkpoint, works when --save_training_data was set to True')
79
+ self.parser.add_argument('--update_param_list', nargs='+', type=str, default=None,
80
+ help="Name of training parameters to update the loaded training checkpoint")
81
+
82
+ def parse(self):
83
+ opts = self.parser.parse_args()
84
+ return opts