dattarij commited on
Commit
8c212a5
1 Parent(s): c4be69a

adding ContraCLIP folder

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ContraCLIP/.gitignore +21 -0
  2. ContraCLIP/README.md +178 -0
  3. ContraCLIP/calculate_jung_radii.py +210 -0
  4. ContraCLIP/checkpoint2model.py +51 -0
  5. ContraCLIP/download_models.py +168 -0
  6. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/02be4f3503db069a28be3bf222c0f64ae6f85d05/image_z.jpg +0 -0
  7. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/02be4f3503db069a28be3bf222c0f64ae6f85d05/latent_code_z.pt +3 -0
  8. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/10c29d1257e7c6e513d8ef23599ba6ba89eda181/image_z.jpg +0 -0
  9. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/10c29d1257e7c6e513d8ef23599ba6ba89eda181/latent_code_z.pt +3 -0
  10. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/323234c425e1b4fd5ec0539bb64765d72afffc75/image_z.jpg +0 -0
  11. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/323234c425e1b4fd5ec0539bb64765d72afffc75/latent_code_z.pt +3 -0
  12. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/555510a5999a3c5eb3097e0b80da4cee97088c8e/image_z.jpg +0 -0
  13. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/555510a5999a3c5eb3097e0b80da4cee97088c8e/latent_code_z.pt +3 -0
  14. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/911ea1a1d3b3e6b57a819ad9310048384608ce08/image_z.jpg +0 -0
  15. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/911ea1a1d3b3e6b57a819ad9310048384608ce08/latent_code_z.pt +3 -0
  16. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/9232b69c406fece5016ccfe260a226eaef1d9181/image_z.jpg +0 -0
  17. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/9232b69c406fece5016ccfe260a226eaef1d9181/latent_code_z.pt +3 -0
  18. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/c1345dab91e4c82070858e3201bcd7eac0bb042e/image_z.jpg +0 -0
  19. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/c1345dab91e4c82070858e3201bcd7eac0bb042e/latent_code_z.pt +3 -0
  20. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/ce26bd5026197c1df60bc43ab1a99f3db8730b0a/image_z.jpg +0 -0
  21. ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/ce26bd5026197c1df60bc43ab1a99f3db8730b0a/latent_code_z.pt +3 -0
  22. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/image_w.jpg +0 -0
  23. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_w+.pt +3 -0
  24. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_w.pt +3 -0
  25. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_z.pt +3 -0
  26. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/image_w.jpg +0 -0
  27. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_w+.pt +3 -0
  28. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_w.pt +3 -0
  29. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_z.pt +3 -0
  30. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/image_w.jpg +0 -0
  31. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_w+.pt +3 -0
  32. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_w.pt +3 -0
  33. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_z.pt +3 -0
  34. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/image_w.jpg +0 -0
  35. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_w+.pt +3 -0
  36. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_w.pt +3 -0
  37. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_z.pt +3 -0
  38. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/image_w.jpg +0 -0
  39. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_w+.pt +3 -0
  40. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_w.pt +3 -0
  41. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_z.pt +3 -0
  42. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/image_w.jpg +0 -0
  43. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_w+.pt +3 -0
  44. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_w.pt +3 -0
  45. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_z.pt +3 -0
  46. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/image_w.jpg +0 -0
  47. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_w+.pt +3 -0
  48. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_w.pt +3 -0
  49. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_z.pt +3 -0
  50. ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/89577abc4b195d823ba8cf80e9405fc7bc822ebe/image_w.jpg +0 -0
ContraCLIP/.gitignore ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .directory
2
+ */.directory
3
+ *~
4
+ .idea/
5
+ contra-clip-venv/
6
+ *.pyc
7
+ __pycache__/
8
+ */__pycache__/
9
+ dev/
10
+ notebooks/
11
+ figs/inkscape/
12
+ models/pretrained/
13
+
14
+ scripts/train/BACKUP/
15
+ scripts/eval/BACKUP/
16
+ scripts/compare/BACKUP/
17
+
18
+ !experiments/
19
+ experiments/*
20
+ experiments/latent_codes/TMP/
21
+ !experiments/latent_codes/
ContraCLIP/README.md ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ContraCLIP: Interpretable GAN generation driven by pairs of contrasting sentences
2
+
3
+ Authors official PyTorch implementation of the **[ContraCLIP: Interpretable GAN generation driven by pairs of contrasting sentences](https://arxiv.org/pdf/2206.02104.pdf)**. If you use this code for your research, please [**cite**](#citation) our paper.
4
+
5
+ > **ContraCLIP: Interpretable GAN generation driven by pairs of contrasting sentences**<br>
6
+ > Christos Tzelepis, James Oldfield, Georgios Tzimiropoulos, and Ioannis Patras<br>
7
+ > https://arxiv.org/abs/2206.02104 <br>
8
+ > ![ContraCLIP Summary](figs/summary.png)
9
+ >
10
+ > **Abstract**: This work addresses the problem of discovering non-linear interpretable paths in the latent space of pre-trained GANs in a model-agnostic manner. In the proposed method, the discovery is driven by a set of pairs of natural language sentences with contrasting semantics, named semantic dipoles, that serve as the limits of the interpretation that we require by the trainable latent paths to encode. By using the pre-trained CLIP encoder, the sentences are projected into the vision-language space, where they serve as dipoles, and where RBF-based warping functions define a set of non-linear directional paths, one for each semantic dipole, allowing in this way traversals from one semantic pole to the other. By defining an objective that discovers paths in the latent space of GANs that generate changes along the desired paths in the vision-language embedding space, we provide an intuitive way of controlling the underlying generating factors and address some of the limitations of the state-of-the-art works, namely, that a) they are typically tailored to specific GAN architectures (i.e., StyleGAN), b) they disregard the relative position of the manipulated and the original image in the image embedding and the relative position of the image and the text embeddings, and c) they lead to abrupt image manipulations and quickly arrive at regions of low density and, thus, low image quality, providing limited control of the generative factors.
11
+
12
+
13
+ | Semantic Dipole (i.e., contrasting sentences given in natural language) | Example |
14
+ | ------------------------------------------------------------ | :----------------------------------------------------------: |
15
+ | *"a picture of an **angry shaved man**." &#8594; "a picture of a **man** with a **beard crying**."* <br>[StyleGAN2@FFHQ] | <img src="figs/examples/stylegan2ffhq_angryshaved2beardcrying.gif" width="500"/> |
16
+ | *"a picture of a person with **open eyes**." &#8594; "a picture of a person with **closed eyes**."* <br>[StyleGAN2@FFHQ] | <img src="figs/examples/stylegan2ffhq_eyes.gif" width="500"/> |
17
+ | *"a picture of a **young person**." &#8594; "a picture of an **old person**."* <br>[StyleGAN2@FFHQ] | <img src="figs/examples/stylegan2ffhq_young2old.gif" width="500"/> |
18
+ | *"a picture of a **man** with **hair**." &#8594; "a picture of a **bald man**."* <br>[ProgGAN@CelebA-HQ] | <img src="figs/examples/pggancelebahq_hair2bald.gif" width="500"/> |
19
+ | *"a picture of a person with **happy** face." &#8594; "a picture of a person with **surprised** face."* <br>[ProgGAN@CelebA-HQ] | <img src="figs/examples/pggancelebahq_happy2surprised.gif" width="500"/> |
20
+ | *"a picture of a **face without makeup**." &#8594; "a picture of a **face with makeup**."* <br>[ProgGAN@CelebA-HQ] | <img src="figs/examples/pggancelebahq_makeup.gif" width="500"/> |
21
+ | *"a picture of an **ugly cat**." &#8594; "a picture of a **cute cat**."* <br>[StyleGAN2@AFHQ-Cats] | <img src="figs/examples/stylegan2afhqcats_ugly2cute.gif" width="500"/> |
22
+ | *"a picture of a **dog** with **small eyes**." &#8594; "a picture of a **dog** with **big eyes**."* <br>[StyleGAN2@AFHQ-Dogs] | <img src="figs/examples/stylegan2afhqdogs_smalleyes2bigeyes.gif" width="500"/> |
23
+
24
+
25
+
26
+ ## Overview
27
+
28
+ ![ContraCLIP Overview](./figs/overview.svg)
29
+ <p alighn="center">
30
+ The CLIP text space, warped due to semantic dipoles of contrasting pairs of sentences in natural language, provides supervision to the optimisation of non-linear interpretable paths in the latent space of a pre-trained GAN.
31
+ </p>
32
+
33
+
34
+ ## Installation
35
+
36
+ We recommend installing the required packages using python's native virtual environment as follows:
37
+
38
+ ```bash
39
+ $ python -m venv contra-clip-venv
40
+ $ source contra-clip-venv/bin/activate
41
+ (contra-clip-venv) $ pip install --upgrade pip
42
+ (contra-clip-venv) $ pip install -r requirements.txt
43
+ (contra-clip-venv) $ pip install git+https://github.com/openai/CLIP.git
44
+ (contra-clip-venv) $ pip install --pre torch torchvision --extra-index-url https://download.pytorch.org/whl/nightly/cu113
45
+ ```
46
+
47
+ For using the aforementioned virtual environment in a Jupyter Notebook, you need to manually add the kernel as follows:
48
+
49
+ ```bash
50
+ (contra-clip-venv) $ python -m ipykernel install --user --name=contra-clip-venv
51
+ ```
52
+
53
+
54
+
55
+ ## Prerequisite pre-trained models and pre-trained ContraCLIP models
56
+
57
+ Download the prerequisite pre-trained models (GAN generators and various pre-trained detectors, such as ArcFace, FairFace, etc), as well as (optionally) pre-trained ContraCLIP models (by passing `-m` or `----contraclip-models`) as follows:
58
+
59
+ ```bash
60
+ (contra-clip-venv) $ python download.py -m
61
+ ```
62
+
63
+ This will create a directory `models/pretrained` with the following sub-directories (~3.3 GiB):
64
+ ```
65
+ ./models/pretrained/
66
+ ├── genforce
67
+ │ ├── pggan_car256.pth
68
+ │ ├── pggan_celebahq1024.pth
69
+ │ ├── pggan_church256.pth
70
+ │ ├── stylegan2_afhqcat512.pth
71
+ │ ├── stylegan2_afhqdog512.pth
72
+ │ ├── stylegan2_car512.pth
73
+ │ ├── stylegan2_church256.pth
74
+ │ └── stylegan2_ffhq1024.pth
75
+ ├── arcface
76
+ │ └── model_ir_se50.pth
77
+ ├── au_detector
78
+ │ └── disfa_adaptation_f0.pth
79
+ ├── celeba_attributes
80
+ │ └── eval_predictor.pth.tar
81
+ ├── fairface
82
+ │ ├── fairface_alldata_4race_20191111.pt
83
+ │ └── res34_fair_align_multi_7_20190809.pt
84
+ ├── hopenet
85
+ │ ├── hopenet_alpha1.pkl
86
+ │ ├── hopenet_alpha2.pkl
87
+ │ └── hopenet_robust_alpha1.pkl
88
+ └── sfd
89
+ └── s3fd-619a316812.pth
90
+ ```
91
+
92
+ as well as, a directory `experiments/complete/` (if not already created by the user upon an experiment's completion) for downloading the ContraCLIP pre-trained models with the following sub-directories (~160 MiB):
93
+
94
+ ```
95
+ .experiments/complete/
96
+ ├── ContraCLIP_pggan_celebahq1024-Z-K9-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-attributes
97
+ ├── ContraCLIP_pggan_celebahq1024-Z-K9-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-cossim-20000-attributes
98
+ ├── ContraCLIP_stylegan2_afhqcat512-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-cats
99
+ ├── ContraCLIP_stylegan2_afhqdog512-W+-K4-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-dogs
100
+ ├── ContraCLIP_stylegan2_car512-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-cars
101
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K21-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-expressions
102
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K21-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-cossim-20000-expressions
103
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-complex
104
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-expressions3
105
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-cossim-20000-complex
106
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K3-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-cossim-20000-expressions3
107
+ ├── ContraCLIP_stylegan2_ffhq1024-W+-K9-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-contrastive_0.07-20000-attributes
108
+ └── ContraCLIP_stylegan2_ffhq1024-W+-K9-D64-lss_beta_0.5-eps0.1_0.2-nonlinear_css_beta_0.5-cossim-20000-attributes
109
+ ```
110
+
111
+ We note that the pre-trained detectors (such as ArcFace) are used only during the evaluation stage (**no ID preserving loss is imposed during training**).
112
+
113
+
114
+
115
+ ## Training
116
+
117
+ For training a ContraCLIP model you need to use `train.py` (check its basic usage by running `python train.py -h`). For example, in order to train a ContraCLIP model for the corpus of contrasting sentences called "expressions3" (defined in `lib/config.py`) on the StyleGAN2 pre-trained (on FFHQ) generator (in its `W` latent space with a truncation parameter equal to `0.7`), the following command:
118
+
119
+ ```bash
120
+ (contra-clip-venv) $ python train.py --gan=stylegan2_ffhq1024 --truncation=0.7 --stylegan-space=W --corpus=expressions3 --num-latent-support-dipoles=128 --loss=contrastive --temperature=0.5 --beta=0.75 --min-shift-magnitude=0.1 --max-shift-magnitude=0.2 --batch-size=3 --max-iter=120000 --log-freq=10--ckp-freq=100
121
+ ```
122
+
123
+ In the example above, the batch size is set to `3` and the training will be conducted for `120000` iterations. Minimum and maximum shift magnitudes are set to `0.1` and `0.2`, respectively, and the number of support dipoles for each latent path is set to `128` (please see the [WarpedGANSpace](https://github.com/chi0tzp/WarpedGANSpace) for more details). Moreover, `contrastive` loss is being used with a temperature parameter equal to `0.5`. The `beta` parameter of the CLIP text space RBF dipoles is set to `0.75`. A set of auxiliary training scripts (for the results reported in the paper) can be found under `scripts/train/`.
124
+
125
+ The training script will create a directory with the following name format:
126
+
127
+ ```
128
+ ContraCLIP_<gan_type>-<latent_space>-K<num_of_paths>-D<num_latent_support_sets>-eps<min_shift_magnitude>_<max_shift_magnitude>-<linear|nonlinear>_beta-<beta>-contrastive_<temperature>-<corpus>
129
+ ```
130
+
131
+ For instance, `ContraCLIP_stylegan2_ffhq1024-W-K3-D128-eps0.1_0.2-nonlinear_beta-0.75-contrastive_0.5-expressions3`, under `experiments/wip/` while training is in progress, which after training completion, will be copied under `experiments/complete/`. This directory has the following structure:
132
+
133
+ ```
134
+ ├── models/
135
+ ├── args.json
136
+ ├── stats.json
137
+ └── command.sh
138
+ ```
139
+
140
+ where `models/` contains the weights for the latent support sets (`latent_support_sets.pt`). While training is in progress (i.e., while this directory is found under `experiments/wip/`), the corresponding `models/` directory contains a checkpoint file (`checkpoint.pt`) containing the last iteration, and the weights for the latent support sets, so as to resume training. Re-run the same command, and if the last iteration is less than the given maximum number of iterations, training will resume from the last iteration. This directory will be referred to as `EXP_DIR` for the rest of this document.
141
+
142
+
143
+
144
+ ## Evaluation
145
+
146
+ As soon as a *ContraCLIP* model is trained, the corresponding experiment's directory (i.e., `EXP_DIR`) can be found under `experiments/complete/`. In order to evaluate the model, we can generate image sequences across the discovered latent paths (for the given pairs of contrasting sentences). For doing so, we need to create a pool of latent codes/images for the corresponding GAN type. This can be done using `sample_gan.py`. The pool of latent codes/images will be stored under `experiments/latent_codes/<gan_type>/`. We will be referring to it as `POOL` for the rest of this document.
147
+
148
+ For example, the following command will create a pool named `stylegan2_ffhq1024-4` under `experiments/latent_codes/stylegan2_ffhq1024/`:
149
+
150
+ ```bash
151
+ (contra-clip-venv) $ python sample_gan.py -v --gan-type=stylegan2_ffhq1024 --stylegan-space=W --truncation=0.7 --num-samples=4
152
+ ```
153
+
154
+ Latent space traversals can then be calculated using the script `traverse_latent_space.py` (please check its basic usage by running `traverse_latent_space.py -h`) for a given model and a given `POOL`. Upon completion, results (i.e., latent traversals) will be stored under the following directory:
155
+
156
+ `experiments/complete/EXP_DIR/results/POOL/<2*shift_steps>_<eps>_<total_length>`,
157
+
158
+ where `eps`, `shift_steps`, and `total_length` denote respectively the shift magnitude (of a single step on the path), the number of such steps, and the total traversal length. A set of auxiliary evaluation scripts (for the results reported in the paper) can be found under `scripts/eval/`.
159
+
160
+
161
+
162
+ ## Citation
163
+
164
+ ```bibtex
165
+ @misc{tzelepis2022contraclip,
166
+ author = {Tzelepis, Christos and James, Oldfield and Tzimiropoulos, Georgios and Patras, Ioannis},
167
+ title = {{ContraCLIP}: Interpretable {GAN} generation driven by pairs of contrasting sentences},
168
+ year={2022},
169
+ eprint={2206.02104},
170
+ archivePrefix={arXiv},
171
+ primaryClass={cs.CV}
172
+ }
173
+ ```
174
+
175
+
176
+
177
+ <!--Acknowledgement: This research was supported by the EU's Horizon 2020 programme H2020-951911 [AI4Media](https://www.ai4media.eu/) project.-->
178
+
ContraCLIP/calculate_jung_radii.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import numpy as np
3
+ import os.path as osp
4
+ import torch
5
+ from lib import GENFORCE_MODELS
6
+ from models.load_generator import load_generator
7
+ from sklearn import linear_model
8
+ from collections import defaultdict
9
+ from tqdm import tqdm
10
+ import json
11
+
12
+
13
+ def make_dict():
14
+ return defaultdict(make_dict)
15
+
16
+
17
+ def main():
18
+ """A script for calculating the radii of minimal enclosing balls for the latent space of a (i.e., in Z/W/W+ space),
19
+ given a truncation parameter. When applicable, a linear model is trained in order to predict the radii of the latent
20
+ codes, given a truncation parameter.
21
+
22
+ The parameters of the linear model (i.e., the weight w and the bias b) are stored for each GAN type and each latent
23
+ space in a json file (i.e., models/jung_radii.json) as a dictionary with the following format:
24
+ {
25
+ ...
26
+ <gan>:
27
+ {
28
+ 'Z': (<w>, <b>),
29
+ 'W':
30
+ {
31
+ ...
32
+ <stylegan-layer>: (<w>, <b>),
33
+ ...
34
+ },
35
+ },
36
+ ...
37
+ }
38
+ so as, given a truncation parameter t, the radius is given as `w * t + b`.
39
+
40
+ Options:
41
+ -v, --verbose : set verbose mode on
42
+ --num-samples : set the number of latent codes to sample for generating images
43
+ --cuda : use CUDA (default)
44
+ --no-cuda : do not use CUDA
45
+ """
46
+ parser = argparse.ArgumentParser(description="Fit a linear model for the jung radius of GAN's latent code given "
47
+ "a truncation parameter")
48
+ parser.add_argument('-v', '--verbose', action='store_true', help="verbose mode on")
49
+ parser.add_argument('--num-samples', type=int, default=1000, help="set number of latent codes to sample")
50
+ parser.add_argument('--cuda', dest='cuda', action='store_true', help="use CUDA during training")
51
+ parser.add_argument('--no-cuda', dest='cuda', action='store_false', help="do NOT use CUDA during training")
52
+ parser.set_defaults(cuda=True)
53
+ # ================================================================================================================ #
54
+
55
+ # Parse given arguments
56
+ args = parser.parse_args()
57
+
58
+ # CUDA
59
+ use_cuda = False
60
+ if torch.cuda.is_available():
61
+ if args.cuda:
62
+ use_cuda = True
63
+ torch.set_default_tensor_type('torch.cuda.FloatTensor')
64
+ else:
65
+ print("*** WARNING ***: It looks like you have a CUDA device, but aren't using CUDA.\n"
66
+ " Run with --cuda for optimal training speed.")
67
+ torch.set_default_tensor_type('torch.FloatTensor')
68
+ else:
69
+ torch.set_default_tensor_type('torch.FloatTensor')
70
+
71
+ # Build jung radii dictionary and populate it
72
+ nested_dict = lambda: defaultdict(nested_dict)
73
+ jung_radii_dict = nested_dict()
74
+ for gan in GENFORCE_MODELS.keys():
75
+ ################################################################################################################
76
+ ## ##
77
+ ## [ StyleGANs ] ##
78
+ ## ##
79
+ ################################################################################################################
80
+ if 'stylegan' in gan:
81
+ ############################################################################################################
82
+ ## ##
83
+ ## [ StyleGAN / Z-space ] ##
84
+ ## ##
85
+ ############################################################################################################
86
+ # Build GAN generator model and load with pre-trained weights
87
+ if args.verbose:
88
+ print(" \\__Build GAN generator model G and load with pre-trained weights...")
89
+ print(" \\__GAN generator : {} (res: {})".format(gan, GENFORCE_MODELS[gan][1]))
90
+ print(" \\__Pre-trained weights: {}".format(GENFORCE_MODELS[gan][0]))
91
+
92
+ G = load_generator(model_name=gan, latent_is_w=False, verbose=args.verbose).eval()
93
+
94
+ # Upload GAN generator model to GPU
95
+ if use_cuda:
96
+ G = G.cuda()
97
+
98
+ # Latent codes sampling
99
+ if args.verbose:
100
+ print(" \\__Sample {} {}-dimensional latent codes...".format(args.num_samples, G.dim_z))
101
+ zs = torch.randn(args.num_samples, G.dim_z)
102
+
103
+ if use_cuda:
104
+ zs = zs.cuda()
105
+
106
+ # Calculate expected latent norm
107
+ if args.verbose:
108
+ print(" \\__Calculate Jung radius...")
109
+ jung_radius = torch.cdist(zs, zs).max() * np.sqrt(G.dim_z / (2 * (G.dim_z + 1)))
110
+ jung_radii_dict[gan]['Z'] = (0.0, jung_radius.cpu().detach().item())
111
+
112
+ ############################################################################################################
113
+ ## ##
114
+ ## [ StyleGAN / W/W+-space ] ##
115
+ ## ##
116
+ ############################################################################################################
117
+ # Build GAN generator model and load with pre-trained weights
118
+ if args.verbose:
119
+ print(" \\__Build GAN generator model G and load with pre-trained weights...")
120
+ print(" \\__GAN generator : {} (res: {})".format(gan, GENFORCE_MODELS[gan][1]))
121
+ print(" \\__Pre-trained weights: {}".format(GENFORCE_MODELS[gan][0]))
122
+
123
+ G = load_generator(model_name=gan, latent_is_w=True, verbose=args.verbose).eval()
124
+
125
+ # Upload GAN generator model to GPU
126
+ if use_cuda:
127
+ G = G.cuda()
128
+
129
+ # Latent codes sampling
130
+ if args.verbose:
131
+ print(" \\__Sample {} {}-dimensional latent codes...".format(args.num_samples, G.dim_z))
132
+ zs = torch.randn(args.num_samples, G.dim_z)
133
+
134
+ if use_cuda:
135
+ zs = zs.cuda()
136
+
137
+ # Get number of W layers for the given StyleGAN
138
+ stylegan_num_layers = G.get_w(zs, truncation=1.0).shape[1]
139
+
140
+ # Calculate expected latent norm and fit a linear model for each version of the W+ space
141
+ if args.verbose:
142
+ print(" \\__Calculate Jung radii and fit linear models...")
143
+ data_per_layer = dict()
144
+ tmp = []
145
+ for truncation in tqdm(np.linspace(0.1, 1.0, 100), desc=" \\__Calculate radii (W space): "):
146
+ ws = G.get_w(zs, truncation=truncation)[:, 0, :]
147
+ jung_radius = torch.cdist(ws, ws).max() * np.sqrt(ws.shape[1] / (2 * (ws.shape[1] + 1)))
148
+ tmp.append([truncation, jung_radius.cpu().detach().item()])
149
+ data_per_layer.update({0: tmp})
150
+
151
+ for ll in tqdm(range(1, stylegan_num_layers), desc=" \\__Calculate radii (W+ space): "):
152
+ tmp = []
153
+ for truncation in np.linspace(0.1, 1.0, 100):
154
+ ws_plus = G.get_w(zs, truncation=truncation)[:, :ll + 1, :]
155
+ ws_plus = ws_plus.reshape(ws_plus.shape[0], -1)
156
+ jung_radius = torch.cdist(ws_plus, ws_plus).max() * \
157
+ np.sqrt(ws_plus.shape[1] / (2 * (ws_plus.shape[1] + 1)))
158
+ tmp.append([truncation, jung_radius.cpu().detach().item()])
159
+ data_per_layer.update({ll: tmp})
160
+
161
+ for ll, v in tqdm(data_per_layer.items(), desc=" \\__Fit linear models"):
162
+ v = np.array(v)
163
+ lm = linear_model.LinearRegression()
164
+ lm.fit(v[:, 0].reshape(-1, 1), v[:, 1].reshape(-1, 1))
165
+ jung_radii_dict[gan]['W'][ll] = (float(lm.coef_[0, 0]), float(lm.intercept_[0]))
166
+
167
+ ################################################################################################################
168
+ ## ##
169
+ ## [ ProgGAN ] ##
170
+ ## ##
171
+ ################################################################################################################
172
+ else:
173
+ # Build GAN generator model and load with pre-trained weights
174
+ if args.verbose:
175
+ print(" \\__Build GAN generator model G and load with pre-trained weights...")
176
+ print(" \\__GAN generator : {} (res: {})".format(gan, GENFORCE_MODELS[gan][1]))
177
+ print(" \\__Pre-trained weights: {}".format(GENFORCE_MODELS[gan][0]))
178
+
179
+ G = load_generator(model_name=gan, latent_is_w=False, verbose=args.verbose).eval()
180
+
181
+ # Upload GAN generator model to GPU
182
+ if use_cuda:
183
+ G = G.cuda()
184
+
185
+ # Latent codes sampling
186
+ if args.verbose:
187
+ print(" \\__Sample {} {}-dimensional latent codes...".format(args.num_samples, G.dim_z))
188
+ zs = torch.randn(args.num_samples, G.dim_z)
189
+
190
+ if use_cuda:
191
+ zs = zs.cuda()
192
+
193
+ # Calculate expected latent norm
194
+ if args.verbose:
195
+ print(" \\__Calculate Jung radius...")
196
+ jung_radius = torch.cdist(zs, zs).max() * np.sqrt(G.dim_z / (2 * (G.dim_z + 1)))
197
+
198
+ print("jung_radius")
199
+ print(jung_radius)
200
+ print(type(jung_radius))
201
+
202
+ jung_radii_dict[gan]['Z'] = (0.0, jung_radius.cpu().detach().item())
203
+
204
+ # Save expected latent norms dictionary
205
+ with open(osp.join('models', 'jung_radii.json'), 'w') as fp:
206
+ json.dump(jung_radii_dict, fp)
207
+
208
+
209
+ if __name__ == '__main__':
210
+ main()
ContraCLIP/checkpoint2model.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os.path as osp
3
+ import torch
4
+
5
+
6
+ def main():
7
+ """An auxiliary script for converting a checkpoint file (`checkpoint.pt`) into a support sets (`support_sets.pt`)
8
+ and a reconstructor (`reconstructor.pt`) weights files.
9
+
10
+ Options:
11
+ ================================================================================================================
12
+ --exp : set experiment's wip model dir, as created by `train.py`, i.e., it should contain a sub-directory
13
+ `models/` with a checkpoint file (`checkpoint.pt`). Checkpoint file contains the weights of the
14
+ support sets and the reconstructor at an intermediate stage of training (after a given iteration).
15
+ ================================================================================================================
16
+ """
17
+ parser = argparse.ArgumentParser(description="Convert a checkpoint file into a support sets and a reconstructor "
18
+ "weights files")
19
+ parser.add_argument('--exp', type=str, required=True, help="set experiment's model dir (created by `train.py`)")
20
+
21
+ # Parse given arguments
22
+ args = parser.parse_args()
23
+
24
+ # Check structure of `args.exp`
25
+ if not osp.isdir(args.exp):
26
+ raise NotADirectoryError("Invalid given directory: {}".format(args.exp))
27
+ models_dir = osp.join(args.exp, 'models')
28
+ if not osp.isdir(models_dir):
29
+ raise NotADirectoryError("Invalid models directory: {}".format(models_dir))
30
+ checkpoint_file = osp.join(models_dir, 'checkpoint.pt')
31
+ if not osp.isfile(checkpoint_file):
32
+ raise FileNotFoundError("Checkpoint file not found: {}".format(checkpoint_file))
33
+
34
+ print("#. Convert checkpoint file into support sets and reconstructor weight files...")
35
+
36
+ # Load checkpoint file
37
+ checkpoint_dict = torch.load(checkpoint_file)
38
+ print(" \\__Checkpoint dictionary: {}".format(checkpoint_dict.keys()))
39
+
40
+ # Get checkpoint iteration
41
+ checkpoint_iter = checkpoint_dict['iter']
42
+ print(" \\__Checkpoint iteration: {}".format(checkpoint_iter))
43
+
44
+ # Save latent support sets (LSS) weights file
45
+ print(" \\__Save checkpoint latent support sets LSS weights file...")
46
+ torch.save(checkpoint_dict['latent_support_sets'],
47
+ osp.join(models_dir, 'latent_support_sets-{:07d}.pt'.format(checkpoint_iter)))
48
+
49
+
50
+ if __name__ == '__main__':
51
+ main()
ContraCLIP/download_models.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import os.path as osp
4
+ import argparse
5
+ import hashlib
6
+ import tarfile
7
+ import time
8
+ import urllib.request
9
+ from lib import GENFORCE, GENFORCE_MODELS, SFD, ARCFACE, FAIRFACE, HOPENET, AUDET, CELEBA_ATTRIBUTES, ContraCLIP_models
10
+
11
+
12
+ def reporthook(count, block_size, total_size):
13
+ global start_time
14
+ if count == 0:
15
+ start_time = time.time()
16
+ return
17
+ duration = time.time() - start_time
18
+ progress_size = int(count * block_size)
19
+ speed = int(progress_size / (1024 * duration))
20
+ percent = min(int(count * block_size * 100 / total_size), 100)
21
+ sys.stdout.write("\r \\__%d%%, %d MB, %d KB/s, %d seconds passed" %
22
+ (percent, progress_size / (1024 * 1024), speed, duration))
23
+
24
+ sys.stdout.flush()
25
+
26
+
27
+ def download(src, sha256sum, dest):
28
+ tmp_tar = osp.join(dest, ".tmp.tar")
29
+ try:
30
+ urllib.request.urlretrieve(src, tmp_tar, reporthook)
31
+ except:
32
+ raise ConnectionError("Error: {}".format(src))
33
+
34
+ sha256_hash = hashlib.sha256()
35
+ with open(tmp_tar, "rb") as f:
36
+ # Read and update hash string value in blocks of 4K
37
+ for byte_block in iter(lambda: f.read(4096), b""):
38
+ sha256_hash.update(byte_block)
39
+
40
+ sha256_check = sha256_hash.hexdigest() == sha256sum
41
+ print()
42
+ print(" \\__Check sha256: {}".format("OK!" if sha256_check else "Error"))
43
+ if not sha256_check:
44
+ raise Exception("Error: Invalid sha256 sum: {}".format(sha256_hash.hexdigest()))
45
+
46
+ tar_file = tarfile.open(tmp_tar, mode='r')
47
+ tar_file.extractall(dest)
48
+ os.remove(tmp_tar)
49
+
50
+
51
+ def main():
52
+ """Download pre-trained GAN generators and various pre-trained detectors (used only during testing), as well as
53
+ pre-trained ContraCLIP models:
54
+ -- GenForce GAN generators [1]
55
+ -- SFD face detector [2]
56
+ -- ArcFace [3]
57
+ -- FairFace [4]
58
+ -- Hopenet [5]
59
+ -- AU detector [6] for 12 DISFA [7] Action Units
60
+ -- Facial attributes detector [8] for 5 CelebA [9] attributes
61
+ -- ContraCLIP [10] pre-trained models:
62
+ StyleGAN2@FFHQ
63
+ ProgGAN@CelebA-HQ:
64
+ StyleGAN2@AFHQ-Cats
65
+ StyleGAN2@AFHQ-Dogs
66
+ StyleGAN2@AFHQ-Cars
67
+
68
+ References:
69
+ [1] https://genforce.github.io/
70
+ [2] Zhang, Shifeng, et al. "S3FD: Single shot scale-invariant face detector." Proceedings of the IEEE
71
+ international conference on computer vision. 2017.
72
+ [3] Deng, Jiankang, et al. "Arcface: Additive angular margin loss for deep face recognition."
73
+ Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2019.
74
+ [4] Karkkainen, Kimmo, and Jungseock Joo. "FairFace: Face attribute dataset for balanced race, gender, and age."
75
+ arXiv preprint arXiv:1908.04913 (2019).
76
+ [5] Doosti, Bardia, et al. "Hope-net: A graph-based model for hand-object pose estimation." Proceedings of the
77
+ IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.
78
+ [6] Ntinou, Ioanna, et al. "A transfer learning approach to heatmap regression for action unit intensity
79
+ estimation." IEEE Transactions on Affective Computing (2021).
80
+ [7] Mavadati, S. Mohammad, et al. "DISFA: A spontaneous facial action intensity database." IEEE Transactions on
81
+ Affective Computing 4.2 (2013): 151-160.
82
+ [8] Jiang, Yuming, et al. "Talk-to-Edit: Fine-Grained Facial Editing via Dialog." Proceedings of the IEEE/CVF
83
+ International Conference on Computer Vision. 2021.
84
+ [9] Liu, Ziwei, et al. "Deep learning face attributes in the wild." Proceedings of the IEEE international
85
+ conference on computer vision. 2015.
86
+ [10] Tzelepis, C., Oldfield, J., Tzimiropoulos, G., & Patras, I. (2022). ContraCLIP: Interpretable GAN
87
+ generation driven by pairs of contrasting sentences. arXiv preprint arXiv:2206.02104.
88
+ """
89
+ parser = argparse.ArgumentParser(description="Download pre-trained models")
90
+ parser.add_argument('-m', '--contraclip-models', action='store_true', help="download pre-trained ContraCLIP models")
91
+ args = parser.parse_args()
92
+
93
+ # Create pre-trained models root directory
94
+ pretrained_models_root = osp.join('models', 'pretrained')
95
+ os.makedirs(pretrained_models_root, exist_ok=True)
96
+
97
+ # Download the following pre-trained GAN generators (under models/pretrained/)
98
+ print("#. Download pre-trained GAN generators...")
99
+ print(" \\__.GenForce")
100
+ download_genforce_models = False
101
+ for k, v in GENFORCE_MODELS.items():
102
+ if not osp.exists(osp.join(pretrained_models_root, 'genforce', v[0])):
103
+ download_genforce_models = True
104
+ break
105
+ if download_genforce_models:
106
+ download(src=GENFORCE[0], sha256sum=GENFORCE[1], dest=pretrained_models_root)
107
+ else:
108
+ print(" \\__Already exists.")
109
+
110
+ print("#. Download pre-trained ArcFace model...")
111
+ print(" \\__.ArcFace")
112
+ if osp.exists(osp.join(pretrained_models_root, 'arcface', 'model_ir_se50.pth')):
113
+ print(" \\__Already exists.")
114
+ else:
115
+ download(src=ARCFACE[0], sha256sum=ARCFACE[1], dest=pretrained_models_root)
116
+
117
+ print("#. Download pre-trained SFD face detector model...")
118
+ print(" \\__.Face detector (SFD)")
119
+ if osp.exists(osp.join(pretrained_models_root, 'sfd', 's3fd-619a316812.pth')):
120
+ print(" \\__Already exists.")
121
+ else:
122
+ download(src=SFD[0], sha256sum=SFD[1], dest=pretrained_models_root)
123
+
124
+ print("#. Download pre-trained FairFace model...")
125
+ print(" \\__.FairFace")
126
+ if osp.exists(osp.join(pretrained_models_root, 'fairface', 'fairface_alldata_4race_20191111.pt')) and \
127
+ osp.exists(osp.join(pretrained_models_root, 'fairface', 'res34_fair_align_multi_7_20190809.pt')):
128
+ print(" \\__Already exists.")
129
+ else:
130
+ download(src=FAIRFACE[0], sha256sum=FAIRFACE[1], dest=pretrained_models_root)
131
+
132
+ print("#. Download pre-trained Hopenet model...")
133
+ print(" \\__.Hopenet")
134
+ if osp.exists(osp.join(pretrained_models_root, 'hopenet', 'hopenet_alpha1.pkl')) and \
135
+ osp.exists(osp.join(pretrained_models_root, 'hopenet', 'hopenet_alpha2.pkl')) and \
136
+ osp.exists(osp.join(pretrained_models_root, 'hopenet', 'hopenet_robust_alpha1.pkl')):
137
+ print(" \\__Already exists.")
138
+ else:
139
+ download(src=HOPENET[0], sha256sum=HOPENET[1], dest=pretrained_models_root)
140
+
141
+ print("#. Download pre-trained AU detector model...")
142
+ print(" \\__.FANet")
143
+ if osp.exists(osp.join(pretrained_models_root, 'au_detector', 'disfa_adaptation_f0.pth')):
144
+ print(" \\__Already exists.")
145
+ else:
146
+ download(src=AUDET[0], sha256sum=AUDET[1], dest=pretrained_models_root)
147
+
148
+ print("#. Download pre-trained CelebA attributes predictors models...")
149
+ print(" \\__.CelebA")
150
+ if osp.exists(osp.join(pretrained_models_root, 'celeba_attributes', 'eval_predictor.pth.tar')):
151
+ print(" \\__Already exists.")
152
+ else:
153
+ download(src=CELEBA_ATTRIBUTES[0], sha256sum=CELEBA_ATTRIBUTES[1], dest=pretrained_models_root)
154
+
155
+ # Download pre-trained ContraCLIP models
156
+ if args.contraclip_models:
157
+ pretrained_contraclip_root = osp.join('experiments', 'complete')
158
+ os.makedirs(pretrained_contraclip_root, exist_ok=True)
159
+
160
+ print("#. Download pre-trained ContraCLIP models...")
161
+ print(" \\__.ContraCLIP pre-trained models...")
162
+ download(src=ContraCLIP_models[0],
163
+ sha256sum=ContraCLIP_models[1],
164
+ dest=pretrained_contraclip_root)
165
+
166
+
167
+ if __name__ == '__main__':
168
+ main()
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/02be4f3503db069a28be3bf222c0f64ae6f85d05/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/02be4f3503db069a28be3bf222c0f64ae6f85d05/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce54ca0a139e42f1c79fe7f60d576d4a485e36627318c7c246275dee69a15ee
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/10c29d1257e7c6e513d8ef23599ba6ba89eda181/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/10c29d1257e7c6e513d8ef23599ba6ba89eda181/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a511c6edd052939a88acc05a299f3da41a5b1f05270d2443fd8a8e916bd05f1
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/323234c425e1b4fd5ec0539bb64765d72afffc75/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/323234c425e1b4fd5ec0539bb64765d72afffc75/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70da19d64562dae4c03e15617a55024c30070f7419ed9d32adcfe5d5240b7adb
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/555510a5999a3c5eb3097e0b80da4cee97088c8e/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/555510a5999a3c5eb3097e0b80da4cee97088c8e/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8342234862b5c57bcf3c4153837f6654052f1de4192f1e7d0a464a9fc0360550
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/911ea1a1d3b3e6b57a819ad9310048384608ce08/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/911ea1a1d3b3e6b57a819ad9310048384608ce08/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5e51d71a466329e2396c8124758dfe3d05f4bab42c2d782b4e4f77af30bccb
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/9232b69c406fece5016ccfe260a226eaef1d9181/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/9232b69c406fece5016ccfe260a226eaef1d9181/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e6f5a35eb58c4c2e7228f423e5860730cdb0d212855fc374fc471702d6b3339
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/c1345dab91e4c82070858e3201bcd7eac0bb042e/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/c1345dab91e4c82070858e3201bcd7eac0bb042e/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5afd158af814aaf1aae0455127b3d5eca7d1e599cb9bad2f45f1d1a7eb4fbee7
3
+ size 2795
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/ce26bd5026197c1df60bc43ab1a99f3db8730b0a/image_z.jpg ADDED
ContraCLIP/experiments/latent_codes/pggan_celebahq1024/pggan_celebahq1024-8/ce26bd5026197c1df60bc43ab1a99f3db8730b0a/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfaa6a1a2189cbdce8d8371428d259aaa9de83fa3975352be57694ca3e3c1144
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82712b3f877423756238c59c19cdc49b131aeab9054e977e0eabce7dae881261
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d3f23f63d0002b4bd330fcabad40161443181b380c2ee343f381d6f57406410
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/08307a8eacf4509f45ab65e8ee76dc53d089dec9/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cee26d0ce68a05d32fcb28eeb8a13418c1978c62c3f3daab5b97fe48fb6a8cc8
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff0a3002d8d50bbc7702c1390726fddb57fa1d384e6b3caecdb462db0aa272a2
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19769cbebb4bf0278d55ebc46442a54d56241487fdf26eaf1cd7e392959809a
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/28c1c29df1be16a26914078f57b2b95598496048/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562f242b6885c9a829a8f4b0e6af453f3dd53684aa182a4ef6b90d871cada48f
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d745141e50c6a3e69ecd5480ad9f22547e78854c2024b3f43ce9b4bcc5627565
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d787702e6e28f7a54afccbaa2c26bbd728af5c1edb33ee15f5c214094590a7
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/3ac589d77dc2845eda68b3e92b92f5aef972bd93/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:874978425c2472ec203918a10d359dd1e9cd9c795939de4263da071a31bc68d2
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccfe3e29ba646c227f286e2554b758e17f339562ef093e6a66e8a86bb8a70948
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e3b76d1580404c16254926af0567e1a14478a4343c30f47d2666f5b3ab2cb80
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/565fd0382c69e4c9462179dbce46cab36b576226/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee2b9d54714f6a7b8ac46a709599cfdffe20b550f0301d1069d8264855d6889
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66213a5a461870c4b818be52684f06475076904d3458a487aadb3d49bd9427b1
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:001cae0eb200dec5984724bb15f5b55abc69d2f76e6ff9ada0628d2c56fa7b80
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/584c090fdba130d896e7b67f942df55f44baf022/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89eddb1448b8b7cab265f23114248dc6ef3f9b398b188ed4237c1488d1d31b4c
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bed42b35a7ed1da7fa51000244723e23b662b1873f68403ec94b3098c79e696
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cce019f116bb2d7e1999b6a160e0cfb405712953616433a64ff076ddf387997
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/60ace58591602b942ef7816000203c07479baf1e/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c464e73f08f4b5288848c1ee775b36d79c45ef1533aec4ef99f09c8b4a125c2
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/image_w.jpg ADDED
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_w+.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aaa266547d4a8ea6e40c399c8e2923c5b7c5b86ec463ba3e6ec3f47103fa28d
3
+ size 33515
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_w.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2107d9d5e9e2ae036dc8ec731b7e335d3ab9f0a58918086101804a3197280210
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/6e3a4bd20238f6964cb447efc2bf4f9ae889212f/latent_code_z.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:010c319d0f4638ef021dd33a819c17c3f9fa8c5bf4403d2c2c462c335d790d54
3
+ size 2795
ContraCLIP/experiments/latent_codes/stylegan2_afhqcat512/stylegan2_afhqcat512-16/89577abc4b195d823ba8cf80e9405fc7bc822ebe/image_w.jpg ADDED