Spaces:
Configuration error
Configuration error
Muhammad Rama Nurimani
commited on
Commit
·
1515d09
1
Parent(s):
7b4a65c
test deploy
Browse files- app.py +45 -0
- latest_net_G.pth +3 -0
- models/__init__.py +67 -0
- models/__pycache__/__init__.cpython-310.pyc +0 -0
- models/__pycache__/base_model.cpython-310.pyc +0 -0
- models/__pycache__/cut_model.cpython-310.pyc +0 -0
- models/__pycache__/networks.cpython-310.pyc +0 -0
- models/__pycache__/patchnce.cpython-310.pyc +0 -0
- models/__pycache__/stylegan_networks.cpython-310.pyc +0 -0
- models/base_model.py +258 -0
- models/cut_model.py +214 -0
- models/cycle_gan_model.py +222 -0
- models/networks.py +1403 -0
- models/patchnce.py +55 -0
- models/sincut_model.py +79 -0
- models/stylegan_networks.py +914 -0
- models/template_model.py +99 -0
- requirements.txt +7 -0
app.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from torchvision import transforms
|
4 |
+
from PIL import Image
|
5 |
+
from models.networks import define_G
|
6 |
+
|
7 |
+
def load_model(model_path):
|
8 |
+
model = define_G(input_nc=1, output_nc=2, ngf=64, netG='unet_256', norm='batch')
|
9 |
+
state_dict = torch.load(model_path, map_location="cpu")
|
10 |
+
model.load_state_dict(state_dict)
|
11 |
+
model.eval()
|
12 |
+
return model
|
13 |
+
|
14 |
+
model_path = "./latest_net_G.pth"
|
15 |
+
model = load_model(model_path)
|
16 |
+
|
17 |
+
|
18 |
+
def style_transfer(input_image):
|
19 |
+
|
20 |
+
transform = transforms.Compose([
|
21 |
+
transforms.Grayscale(num_output_channels=1),
|
22 |
+
transforms.Resize((256, 256)),
|
23 |
+
transforms.ToTensor(),
|
24 |
+
transforms.Normalize(mean=[0.5], mean=[0.5])
|
25 |
+
])
|
26 |
+
input_tensor = transform(input_image).unsqueeze(0)
|
27 |
+
with torch.no_grad():
|
28 |
+
output_tensor = model(input_tensor)
|
29 |
+
|
30 |
+
|
31 |
+
output_tensor = output_tensor.squeeze(0).cpu().clamp(-1, 1)
|
32 |
+
output_tensor = (output_tensor + 1) / 2 # Kembalikan ke [0, 1]
|
33 |
+
return transforms.ToPILImage()(output_tensor)
|
34 |
+
|
35 |
+
|
36 |
+
iface = gr.Interface(
|
37 |
+
fn=style_transfer,
|
38 |
+
inputs=gr.Image(type="pil"),
|
39 |
+
outputs=gr.Image(type="pil"),
|
40 |
+
live=True,
|
41 |
+
title="Colorization",
|
42 |
+
description="Colorize your black and white photos with pix2pix."
|
43 |
+
)
|
44 |
+
|
45 |
+
iface.launch()
|
latest_net_G.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60066e5f50b75604b160eff4eb2cb561f5700ec6755658cac53a9244c8b10790
|
3 |
+
size 217710350
|
models/__init__.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""This package contains modules related to objective functions, optimizations, and network architectures.
|
2 |
+
|
3 |
+
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
|
4 |
+
You need to implement the following five functions:
|
5 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
6 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
7 |
+
-- <forward>: produce intermediate results.
|
8 |
+
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
|
9 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
10 |
+
|
11 |
+
In the function <__init__>, you need to define four lists:
|
12 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
13 |
+
-- self.model_names (str list): define networks used in our training.
|
14 |
+
-- self.visual_names (str list): specify the images that you want to display and save.
|
15 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
|
16 |
+
|
17 |
+
Now you can use the model class by specifying flag '--model dummy'.
|
18 |
+
See our template model class 'template_model.py' for more details.
|
19 |
+
"""
|
20 |
+
|
21 |
+
import importlib
|
22 |
+
from models.base_model import BaseModel
|
23 |
+
|
24 |
+
|
25 |
+
def find_model_using_name(model_name):
|
26 |
+
"""Import the module "models/[model_name]_model.py".
|
27 |
+
|
28 |
+
In the file, the class called DatasetNameModel() will
|
29 |
+
be instantiated. It has to be a subclass of BaseModel,
|
30 |
+
and it is case-insensitive.
|
31 |
+
"""
|
32 |
+
model_filename = "models." + model_name + "_model"
|
33 |
+
modellib = importlib.import_module(model_filename)
|
34 |
+
model = None
|
35 |
+
target_model_name = model_name.replace('_', '') + 'model'
|
36 |
+
for name, cls in modellib.__dict__.items():
|
37 |
+
if name.lower() == target_model_name.lower() \
|
38 |
+
and issubclass(cls, BaseModel):
|
39 |
+
model = cls
|
40 |
+
|
41 |
+
if model is None:
|
42 |
+
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
|
43 |
+
exit(0)
|
44 |
+
|
45 |
+
return model
|
46 |
+
|
47 |
+
|
48 |
+
def get_option_setter(model_name):
|
49 |
+
"""Return the static method <modify_commandline_options> of the model class."""
|
50 |
+
model_class = find_model_using_name(model_name)
|
51 |
+
return model_class.modify_commandline_options
|
52 |
+
|
53 |
+
|
54 |
+
def create_model(opt):
|
55 |
+
"""Create a model given the option.
|
56 |
+
|
57 |
+
This function warps the class CustomDatasetDataLoader.
|
58 |
+
This is the main interface between this package and 'train.py'/'test.py'
|
59 |
+
|
60 |
+
Example:
|
61 |
+
>>> from models import create_model
|
62 |
+
>>> model = create_model(opt)
|
63 |
+
"""
|
64 |
+
model = find_model_using_name(opt.model)
|
65 |
+
instance = model(opt)
|
66 |
+
print("model [%s] was created" % type(instance).__name__)
|
67 |
+
return instance
|
models/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.21 kB). View file
|
|
models/__pycache__/base_model.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
models/__pycache__/cut_model.cpython-310.pyc
ADDED
Binary file (8.11 kB). View file
|
|
models/__pycache__/networks.cpython-310.pyc
ADDED
Binary file (47.8 kB). View file
|
|
models/__pycache__/patchnce.cpython-310.pyc
ADDED
Binary file (1.54 kB). View file
|
|
models/__pycache__/stylegan_networks.cpython-310.pyc
ADDED
Binary file (22.2 kB). View file
|
|
models/base_model.py
ADDED
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from collections import OrderedDict
|
4 |
+
from abc import ABC, abstractmethod
|
5 |
+
from . import networks
|
6 |
+
|
7 |
+
|
8 |
+
class BaseModel(ABC):
|
9 |
+
"""This class is an abstract base class (ABC) for models.
|
10 |
+
To create a subclass, you need to implement the following five functions:
|
11 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
12 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
13 |
+
-- <forward>: produce intermediate results.
|
14 |
+
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
|
15 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(self, opt):
|
19 |
+
"""Initialize the BaseModel class.
|
20 |
+
|
21 |
+
Parameters:
|
22 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
23 |
+
|
24 |
+
When creating your custom class, you need to implement your own initialization.
|
25 |
+
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
|
26 |
+
Then, you need to define four lists:
|
27 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
28 |
+
-- self.model_names (str list): specify the images that you want to display and save.
|
29 |
+
-- self.visual_names (str list): define networks used in our training.
|
30 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
31 |
+
"""
|
32 |
+
self.opt = opt
|
33 |
+
self.gpu_ids = opt.gpu_ids
|
34 |
+
self.isTrain = opt.isTrain
|
35 |
+
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
|
36 |
+
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
|
37 |
+
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
|
38 |
+
torch.backends.cudnn.benchmark = True
|
39 |
+
self.loss_names = []
|
40 |
+
self.model_names = []
|
41 |
+
self.visual_names = []
|
42 |
+
self.optimizers = []
|
43 |
+
self.image_paths = []
|
44 |
+
self.metric = 0 # used for learning rate policy 'plateau'
|
45 |
+
|
46 |
+
@staticmethod
|
47 |
+
def dict_grad_hook_factory(add_func=lambda x: x):
|
48 |
+
saved_dict = dict()
|
49 |
+
|
50 |
+
def hook_gen(name):
|
51 |
+
def grad_hook(grad):
|
52 |
+
saved_vals = add_func(grad)
|
53 |
+
saved_dict[name] = saved_vals
|
54 |
+
return grad_hook
|
55 |
+
return hook_gen, saved_dict
|
56 |
+
|
57 |
+
@staticmethod
|
58 |
+
def modify_commandline_options(parser, is_train):
|
59 |
+
"""Add new model-specific options, and rewrite default values for existing options.
|
60 |
+
|
61 |
+
Parameters:
|
62 |
+
parser -- original option parser
|
63 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
the modified parser.
|
67 |
+
"""
|
68 |
+
return parser
|
69 |
+
|
70 |
+
@abstractmethod
|
71 |
+
def set_input(self, input):
|
72 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
73 |
+
|
74 |
+
Parameters:
|
75 |
+
input (dict): includes the data itself and its metadata information.
|
76 |
+
"""
|
77 |
+
pass
|
78 |
+
|
79 |
+
@abstractmethod
|
80 |
+
def forward(self):
|
81 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
82 |
+
pass
|
83 |
+
|
84 |
+
@abstractmethod
|
85 |
+
def optimize_parameters(self):
|
86 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
87 |
+
pass
|
88 |
+
|
89 |
+
def setup(self, opt):
|
90 |
+
"""Load and print networks; create schedulers
|
91 |
+
|
92 |
+
Parameters:
|
93 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
94 |
+
"""
|
95 |
+
if self.isTrain:
|
96 |
+
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
|
97 |
+
if not self.isTrain or opt.continue_train:
|
98 |
+
load_suffix = opt.epoch
|
99 |
+
self.load_networks(load_suffix)
|
100 |
+
|
101 |
+
self.print_networks(opt.verbose)
|
102 |
+
|
103 |
+
def parallelize(self):
|
104 |
+
for name in self.model_names:
|
105 |
+
if isinstance(name, str):
|
106 |
+
net = getattr(self, 'net' + name)
|
107 |
+
setattr(self, 'net' + name, torch.nn.DataParallel(net, self.opt.gpu_ids))
|
108 |
+
|
109 |
+
def data_dependent_initialize(self, data):
|
110 |
+
pass
|
111 |
+
|
112 |
+
def eval(self):
|
113 |
+
"""Make models eval mode during test time"""
|
114 |
+
for name in self.model_names:
|
115 |
+
if isinstance(name, str):
|
116 |
+
net = getattr(self, 'net' + name)
|
117 |
+
net.eval()
|
118 |
+
|
119 |
+
def test(self):
|
120 |
+
"""Forward function used in test time.
|
121 |
+
|
122 |
+
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
|
123 |
+
It also calls <compute_visuals> to produce additional visualization results
|
124 |
+
"""
|
125 |
+
with torch.no_grad():
|
126 |
+
self.forward()
|
127 |
+
self.compute_visuals()
|
128 |
+
|
129 |
+
def compute_visuals(self):
|
130 |
+
"""Calculate additional output images for visdom and HTML visualization"""
|
131 |
+
pass
|
132 |
+
|
133 |
+
def get_image_paths(self):
|
134 |
+
""" Return image paths that are used to load current data"""
|
135 |
+
return self.image_paths
|
136 |
+
|
137 |
+
def update_learning_rate(self):
|
138 |
+
"""Update learning rates for all the networks; called at the end of every epoch"""
|
139 |
+
for scheduler in self.schedulers:
|
140 |
+
if self.opt.lr_policy == 'plateau':
|
141 |
+
scheduler.step(self.metric)
|
142 |
+
else:
|
143 |
+
scheduler.step()
|
144 |
+
|
145 |
+
lr = self.optimizers[0].param_groups[0]['lr']
|
146 |
+
print('learning rate = %.7f' % lr)
|
147 |
+
|
148 |
+
def get_current_visuals(self):
|
149 |
+
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
|
150 |
+
visual_ret = OrderedDict()
|
151 |
+
for name in self.visual_names:
|
152 |
+
if isinstance(name, str):
|
153 |
+
visual_ret[name] = getattr(self, name)
|
154 |
+
return visual_ret
|
155 |
+
|
156 |
+
def get_current_losses(self):
|
157 |
+
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
|
158 |
+
errors_ret = OrderedDict()
|
159 |
+
for name in self.loss_names:
|
160 |
+
if isinstance(name, str):
|
161 |
+
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
|
162 |
+
return errors_ret
|
163 |
+
|
164 |
+
def save_networks(self, epoch):
|
165 |
+
"""Save all the networks to the disk.
|
166 |
+
|
167 |
+
Parameters:
|
168 |
+
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
169 |
+
"""
|
170 |
+
for name in self.model_names:
|
171 |
+
if isinstance(name, str):
|
172 |
+
save_filename = '%s_net_%s.pth' % (epoch, name)
|
173 |
+
save_path = os.path.join(self.save_dir, save_filename)
|
174 |
+
net = getattr(self, 'net' + name)
|
175 |
+
|
176 |
+
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
|
177 |
+
torch.save(net.module.cpu().state_dict(), save_path)
|
178 |
+
net.cuda(self.gpu_ids[0])
|
179 |
+
else:
|
180 |
+
torch.save(net.cpu().state_dict(), save_path)
|
181 |
+
|
182 |
+
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
|
183 |
+
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
|
184 |
+
key = keys[i]
|
185 |
+
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
|
186 |
+
if module.__class__.__name__.startswith('InstanceNorm') and \
|
187 |
+
(key == 'running_mean' or key == 'running_var'):
|
188 |
+
if getattr(module, key) is None:
|
189 |
+
state_dict.pop('.'.join(keys))
|
190 |
+
if module.__class__.__name__.startswith('InstanceNorm') and \
|
191 |
+
(key == 'num_batches_tracked'):
|
192 |
+
state_dict.pop('.'.join(keys))
|
193 |
+
else:
|
194 |
+
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
195 |
+
|
196 |
+
def load_networks(self, epoch):
|
197 |
+
"""Load all the networks from the disk.
|
198 |
+
|
199 |
+
Parameters:
|
200 |
+
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
|
201 |
+
"""
|
202 |
+
for name in self.model_names:
|
203 |
+
if isinstance(name, str):
|
204 |
+
load_filename = '%s_net_%s.pth' % (epoch, name)
|
205 |
+
if self.opt.isTrain and self.opt.pretrained_name is not None:
|
206 |
+
load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
|
207 |
+
else:
|
208 |
+
load_dir = self.save_dir
|
209 |
+
|
210 |
+
load_path = os.path.join(load_dir, load_filename)
|
211 |
+
net = getattr(self, 'net' + name)
|
212 |
+
if isinstance(net, torch.nn.DataParallel):
|
213 |
+
net = net.module
|
214 |
+
print('loading the model from %s' % load_path)
|
215 |
+
# if you are using PyTorch newer than 0.4 (e.g., built from
|
216 |
+
# GitHub source), you can remove str() on self.device
|
217 |
+
state_dict = torch.load(load_path, map_location=str(self.device))
|
218 |
+
if hasattr(state_dict, '_metadata'):
|
219 |
+
del state_dict._metadata
|
220 |
+
|
221 |
+
# patch InstanceNorm checkpoints prior to 0.4
|
222 |
+
# for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
|
223 |
+
# self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
|
224 |
+
net.load_state_dict(state_dict)
|
225 |
+
|
226 |
+
def print_networks(self, verbose):
|
227 |
+
"""Print the total number of parameters in the network and (if verbose) network architecture
|
228 |
+
|
229 |
+
Parameters:
|
230 |
+
verbose (bool) -- if verbose: print the network architecture
|
231 |
+
"""
|
232 |
+
print('---------- Networks initialized -------------')
|
233 |
+
for name in self.model_names:
|
234 |
+
if isinstance(name, str):
|
235 |
+
net = getattr(self, 'net' + name)
|
236 |
+
num_params = 0
|
237 |
+
for param in net.parameters():
|
238 |
+
num_params += param.numel()
|
239 |
+
if verbose:
|
240 |
+
print(net)
|
241 |
+
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
|
242 |
+
print('-----------------------------------------------')
|
243 |
+
|
244 |
+
def set_requires_grad(self, nets, requires_grad=False):
|
245 |
+
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
|
246 |
+
Parameters:
|
247 |
+
nets (network list) -- a list of networks
|
248 |
+
requires_grad (bool) -- whether the networks require gradients or not
|
249 |
+
"""
|
250 |
+
if not isinstance(nets, list):
|
251 |
+
nets = [nets]
|
252 |
+
for net in nets:
|
253 |
+
if net is not None:
|
254 |
+
for param in net.parameters():
|
255 |
+
param.requires_grad = requires_grad
|
256 |
+
|
257 |
+
def generate_visuals_for_evaluation(self, data, mode):
|
258 |
+
return {}
|
models/cut_model.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
from .base_model import BaseModel
|
4 |
+
from . import networks
|
5 |
+
from .patchnce import PatchNCELoss
|
6 |
+
import util.util as util
|
7 |
+
|
8 |
+
|
9 |
+
class CUTModel(BaseModel):
|
10 |
+
""" This class implements CUT and FastCUT model, described in the paper
|
11 |
+
Contrastive Learning for Unpaired Image-to-Image Translation
|
12 |
+
Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu
|
13 |
+
ECCV, 2020
|
14 |
+
|
15 |
+
The code borrows heavily from the PyTorch implementation of CycleGAN
|
16 |
+
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
|
17 |
+
"""
|
18 |
+
@staticmethod
|
19 |
+
def modify_commandline_options(parser, is_train=True):
|
20 |
+
""" Configures options specific for CUT model
|
21 |
+
"""
|
22 |
+
parser.add_argument('--CUT_mode', type=str, default="CUT", choices='(CUT, cut, FastCUT, fastcut)')
|
23 |
+
|
24 |
+
parser.add_argument('--lambda_GAN', type=float, default=1.0, help='weight for GAN loss:GAN(G(X))')
|
25 |
+
parser.add_argument('--lambda_NCE', type=float, default=1.0, help='weight for NCE loss: NCE(G(X), X)')
|
26 |
+
parser.add_argument('--nce_idt', type=util.str2bool, nargs='?', const=True, default=False, help='use NCE loss for identity mapping: NCE(G(Y), Y))')
|
27 |
+
parser.add_argument('--nce_layers', type=str, default='0,4,8,12,16', help='compute NCE loss on which layers')
|
28 |
+
parser.add_argument('--nce_includes_all_negatives_from_minibatch',
|
29 |
+
type=util.str2bool, nargs='?', const=True, default=False,
|
30 |
+
help='(used for single image translation) If True, include the negatives from the other samples of the minibatch when computing the contrastive loss. Please see models/patchnce.py for more details.')
|
31 |
+
parser.add_argument('--netF', type=str, default='mlp_sample', choices=['sample', 'reshape', 'mlp_sample'], help='how to downsample the feature map')
|
32 |
+
parser.add_argument('--netF_nc', type=int, default=256)
|
33 |
+
parser.add_argument('--nce_T', type=float, default=0.07, help='temperature for NCE loss')
|
34 |
+
parser.add_argument('--num_patches', type=int, default=256, help='number of patches per layer')
|
35 |
+
parser.add_argument('--flip_equivariance',
|
36 |
+
type=util.str2bool, nargs='?', const=True, default=False,
|
37 |
+
help="Enforce flip-equivariance as additional regularization. It's used by FastCUT, but not CUT")
|
38 |
+
|
39 |
+
parser.set_defaults(pool_size=0) # no image pooling
|
40 |
+
|
41 |
+
opt, _ = parser.parse_known_args()
|
42 |
+
|
43 |
+
# Set default parameters for CUT and FastCUT
|
44 |
+
if opt.CUT_mode.lower() == "cut":
|
45 |
+
parser.set_defaults(nce_idt=True, lambda_NCE=1.0)
|
46 |
+
elif opt.CUT_mode.lower() == "fastcut":
|
47 |
+
parser.set_defaults(
|
48 |
+
nce_idt=False, lambda_NCE=10.0, flip_equivariance=True,
|
49 |
+
n_epochs=150, n_epochs_decay=50
|
50 |
+
)
|
51 |
+
else:
|
52 |
+
raise ValueError(opt.CUT_mode)
|
53 |
+
|
54 |
+
return parser
|
55 |
+
|
56 |
+
def __init__(self, opt):
|
57 |
+
BaseModel.__init__(self, opt)
|
58 |
+
|
59 |
+
# specify the training losses you want to print out.
|
60 |
+
# The training/test scripts will call <BaseModel.get_current_losses>
|
61 |
+
self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'G', 'NCE']
|
62 |
+
self.visual_names = ['real_A', 'fake_B', 'real_B']
|
63 |
+
self.nce_layers = [int(i) for i in self.opt.nce_layers.split(',')]
|
64 |
+
|
65 |
+
if opt.nce_idt and self.isTrain:
|
66 |
+
self.loss_names += ['NCE_Y']
|
67 |
+
self.visual_names += ['idt_B']
|
68 |
+
|
69 |
+
if self.isTrain:
|
70 |
+
self.model_names = ['G', 'F', 'D']
|
71 |
+
else: # during test time, only load G
|
72 |
+
self.model_names = ['G']
|
73 |
+
|
74 |
+
# define networks (both generator and discriminator)
|
75 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt)
|
76 |
+
self.netF = networks.define_F(opt.input_nc, opt.netF, opt.normG, not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
|
77 |
+
|
78 |
+
if self.isTrain:
|
79 |
+
self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt)
|
80 |
+
|
81 |
+
# define loss functions
|
82 |
+
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
|
83 |
+
self.criterionNCE = []
|
84 |
+
|
85 |
+
for nce_layer in self.nce_layers:
|
86 |
+
self.criterionNCE.append(PatchNCELoss(opt).to(self.device))
|
87 |
+
|
88 |
+
self.criterionIdt = torch.nn.L1Loss().to(self.device)
|
89 |
+
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
90 |
+
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2))
|
91 |
+
self.optimizers.append(self.optimizer_G)
|
92 |
+
self.optimizers.append(self.optimizer_D)
|
93 |
+
|
94 |
+
def data_dependent_initialize(self, data):
|
95 |
+
"""
|
96 |
+
The feature network netF is defined in terms of the shape of the intermediate, extracted
|
97 |
+
features of the encoder portion of netG. Because of this, the weights of netF are
|
98 |
+
initialized at the first feedforward pass with some input images.
|
99 |
+
Please also see PatchSampleF.create_mlp(), which is called at the first forward() call.
|
100 |
+
"""
|
101 |
+
bs_per_gpu = data["A"].size(0) // max(len(self.opt.gpu_ids), 1)
|
102 |
+
self.set_input(data)
|
103 |
+
self.real_A = self.real_A[:bs_per_gpu]
|
104 |
+
self.real_B = self.real_B[:bs_per_gpu]
|
105 |
+
self.forward() # compute fake images: G(A)
|
106 |
+
if self.opt.isTrain:
|
107 |
+
self.compute_D_loss().backward() # calculate gradients for D
|
108 |
+
self.compute_G_loss().backward() # calculate graidents for G
|
109 |
+
if self.opt.lambda_NCE > 0.0:
|
110 |
+
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
|
111 |
+
self.optimizers.append(self.optimizer_F)
|
112 |
+
|
113 |
+
def optimize_parameters(self):
|
114 |
+
# forward
|
115 |
+
self.forward()
|
116 |
+
|
117 |
+
# update D
|
118 |
+
self.set_requires_grad(self.netD, True)
|
119 |
+
self.optimizer_D.zero_grad()
|
120 |
+
self.loss_D = self.compute_D_loss()
|
121 |
+
self.loss_D.backward()
|
122 |
+
self.optimizer_D.step()
|
123 |
+
|
124 |
+
# update G
|
125 |
+
self.set_requires_grad(self.netD, False)
|
126 |
+
self.optimizer_G.zero_grad()
|
127 |
+
if self.opt.netF == 'mlp_sample':
|
128 |
+
self.optimizer_F.zero_grad()
|
129 |
+
self.loss_G = self.compute_G_loss()
|
130 |
+
self.loss_G.backward()
|
131 |
+
self.optimizer_G.step()
|
132 |
+
if self.opt.netF == 'mlp_sample':
|
133 |
+
self.optimizer_F.step()
|
134 |
+
|
135 |
+
def set_input(self, input):
|
136 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
137 |
+
Parameters:
|
138 |
+
input (dict): include the data itself and its metadata information.
|
139 |
+
The option 'direction' can be used to swap domain A and domain B.
|
140 |
+
"""
|
141 |
+
AtoB = self.opt.direction == 'AtoB'
|
142 |
+
self.real_A = input['A' if AtoB else 'B'].to(self.device)
|
143 |
+
self.real_B = input['B' if AtoB else 'A'].to(self.device)
|
144 |
+
self.image_paths = input['A_paths' if AtoB else 'B_paths']
|
145 |
+
|
146 |
+
def forward(self):
|
147 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
148 |
+
self.real = torch.cat((self.real_A, self.real_B), dim=0) if self.opt.nce_idt and self.opt.isTrain else self.real_A
|
149 |
+
if self.opt.flip_equivariance:
|
150 |
+
self.flipped_for_equivariance = self.opt.isTrain and (np.random.random() < 0.5)
|
151 |
+
if self.flipped_for_equivariance:
|
152 |
+
self.real = torch.flip(self.real, [3])
|
153 |
+
|
154 |
+
self.fake = self.netG(self.real)
|
155 |
+
self.fake_B = self.fake[:self.real_A.size(0)]
|
156 |
+
if self.opt.nce_idt:
|
157 |
+
self.idt_B = self.fake[self.real_A.size(0):]
|
158 |
+
|
159 |
+
def compute_D_loss(self):
|
160 |
+
"""Calculate GAN loss for the discriminator"""
|
161 |
+
fake = self.fake_B.detach()
|
162 |
+
# Fake; stop backprop to the generator by detaching fake_B
|
163 |
+
pred_fake = self.netD(fake)
|
164 |
+
self.loss_D_fake = self.criterionGAN(pred_fake, False).mean()
|
165 |
+
# Real
|
166 |
+
self.pred_real = self.netD(self.real_B)
|
167 |
+
loss_D_real = self.criterionGAN(self.pred_real, True)
|
168 |
+
self.loss_D_real = loss_D_real.mean()
|
169 |
+
|
170 |
+
# combine loss and calculate gradients
|
171 |
+
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
|
172 |
+
return self.loss_D
|
173 |
+
|
174 |
+
def compute_G_loss(self):
|
175 |
+
"""Calculate GAN and NCE loss for the generator"""
|
176 |
+
fake = self.fake_B
|
177 |
+
# First, G(A) should fake the discriminator
|
178 |
+
if self.opt.lambda_GAN > 0.0:
|
179 |
+
pred_fake = self.netD(fake)
|
180 |
+
self.loss_G_GAN = self.criterionGAN(pred_fake, True).mean() * self.opt.lambda_GAN
|
181 |
+
else:
|
182 |
+
self.loss_G_GAN = 0.0
|
183 |
+
|
184 |
+
if self.opt.lambda_NCE > 0.0:
|
185 |
+
self.loss_NCE = self.calculate_NCE_loss(self.real_A, self.fake_B)
|
186 |
+
else:
|
187 |
+
self.loss_NCE, self.loss_NCE_bd = 0.0, 0.0
|
188 |
+
|
189 |
+
if self.opt.nce_idt and self.opt.lambda_NCE > 0.0:
|
190 |
+
self.loss_NCE_Y = self.calculate_NCE_loss(self.real_B, self.idt_B)
|
191 |
+
loss_NCE_both = (self.loss_NCE + self.loss_NCE_Y) * 0.5
|
192 |
+
else:
|
193 |
+
loss_NCE_both = self.loss_NCE
|
194 |
+
|
195 |
+
self.loss_G = self.loss_G_GAN + loss_NCE_both
|
196 |
+
return self.loss_G
|
197 |
+
|
198 |
+
def calculate_NCE_loss(self, src, tgt):
|
199 |
+
n_layers = len(self.nce_layers)
|
200 |
+
feat_q = self.netG(tgt, self.nce_layers, encode_only=True)
|
201 |
+
|
202 |
+
if self.opt.flip_equivariance and self.flipped_for_equivariance:
|
203 |
+
feat_q = [torch.flip(fq, [3]) for fq in feat_q]
|
204 |
+
|
205 |
+
feat_k = self.netG(src, self.nce_layers, encode_only=True)
|
206 |
+
feat_k_pool, sample_ids = self.netF(feat_k, self.opt.num_patches, None)
|
207 |
+
feat_q_pool, _ = self.netF(feat_q, self.opt.num_patches, sample_ids)
|
208 |
+
|
209 |
+
total_nce_loss = 0.0
|
210 |
+
for f_q, f_k, crit, nce_layer in zip(feat_q_pool, feat_k_pool, self.criterionNCE, self.nce_layers):
|
211 |
+
loss = crit(f_q, f_k) * self.opt.lambda_NCE
|
212 |
+
total_nce_loss += loss.mean()
|
213 |
+
|
214 |
+
return total_nce_loss / n_layers
|
models/cycle_gan_model.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import itertools
|
3 |
+
from util.image_pool import ImagePool
|
4 |
+
from .base_model import BaseModel
|
5 |
+
from . import networks
|
6 |
+
try:
|
7 |
+
from apex import amp
|
8 |
+
except ImportError as error:
|
9 |
+
print(error)
|
10 |
+
|
11 |
+
|
12 |
+
class CycleGANModel(BaseModel):
|
13 |
+
"""
|
14 |
+
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
|
15 |
+
|
16 |
+
The model training requires '--dataset_mode unaligned' dataset.
|
17 |
+
By default, it uses a '--netG resnet_9blocks' ResNet generator,
|
18 |
+
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
|
19 |
+
and a least-square GANs objective ('--gan_mode lsgan').
|
20 |
+
|
21 |
+
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
|
22 |
+
"""
|
23 |
+
@staticmethod
|
24 |
+
def modify_commandline_options(parser, is_train=True):
|
25 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
26 |
+
|
27 |
+
Parameters:
|
28 |
+
parser -- original option parser
|
29 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
30 |
+
|
31 |
+
Returns:
|
32 |
+
the modified parser.
|
33 |
+
|
34 |
+
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
|
35 |
+
A (source domain), B (target domain).
|
36 |
+
Generators: G_A: A -> B; G_B: B -> A.
|
37 |
+
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
|
38 |
+
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
|
39 |
+
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
|
40 |
+
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
|
41 |
+
Dropout is not used in the original CycleGAN paper.
|
42 |
+
"""
|
43 |
+
# parser.set_defaults(no_dropout=True, no_antialias=True, no_antialias_up=True) # default CycleGAN did not use dropout
|
44 |
+
# parser.set_defaults(no_dropout=True)
|
45 |
+
if is_train:
|
46 |
+
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
|
47 |
+
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
|
48 |
+
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
|
49 |
+
|
50 |
+
return parser
|
51 |
+
|
52 |
+
def __init__(self, opt):
|
53 |
+
"""Initialize the CycleGAN class.
|
54 |
+
|
55 |
+
Parameters:
|
56 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
57 |
+
"""
|
58 |
+
BaseModel.__init__(self, opt)
|
59 |
+
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
|
60 |
+
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
|
61 |
+
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
|
62 |
+
visual_names_A = ['real_A', 'fake_B', 'rec_A']
|
63 |
+
visual_names_B = ['real_B', 'fake_A', 'rec_B']
|
64 |
+
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
|
65 |
+
visual_names_A.append('idt_B')
|
66 |
+
visual_names_B.append('idt_A')
|
67 |
+
|
68 |
+
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
|
69 |
+
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
|
70 |
+
if self.isTrain:
|
71 |
+
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
|
72 |
+
else: # during test time, only load Gs
|
73 |
+
self.model_names = ['G_A', 'G_B']
|
74 |
+
|
75 |
+
# define networks (both Generators and discriminators)
|
76 |
+
# The naming is different from those used in the paper.
|
77 |
+
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
|
78 |
+
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG,
|
79 |
+
not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt=opt)
|
80 |
+
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.normG,
|
81 |
+
not opt.no_dropout, opt.init_type, opt.init_gain, opt.no_antialias, opt.no_antialias_up, self.gpu_ids, opt=opt)
|
82 |
+
|
83 |
+
if self.isTrain: # define discriminators
|
84 |
+
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
|
85 |
+
opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt=opt)
|
86 |
+
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
|
87 |
+
opt.n_layers_D, opt.normD, opt.init_type, opt.init_gain, opt.no_antialias, self.gpu_ids, opt=opt)
|
88 |
+
|
89 |
+
if self.isTrain:
|
90 |
+
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
|
91 |
+
assert(opt.input_nc == opt.output_nc)
|
92 |
+
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
|
93 |
+
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
|
94 |
+
# define loss functions
|
95 |
+
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
|
96 |
+
self.criterionCycle = torch.nn.L1Loss()
|
97 |
+
self.criterionIdt = torch.nn.L1Loss()
|
98 |
+
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
|
99 |
+
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
|
100 |
+
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
|
101 |
+
self.optimizers.append(self.optimizer_G)
|
102 |
+
self.optimizers.append(self.optimizer_D)
|
103 |
+
|
104 |
+
def set_input(self, input):
|
105 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
106 |
+
|
107 |
+
Parameters:
|
108 |
+
input (dict): include the data itself and its metadata information.
|
109 |
+
|
110 |
+
The option 'direction' can be used to swap domain A and domain B.
|
111 |
+
"""
|
112 |
+
AtoB = self.opt.direction == 'AtoB'
|
113 |
+
self.real_A = input['A' if AtoB else 'B'].to(self.device)
|
114 |
+
self.real_B = input['B' if AtoB else 'A'].to(self.device)
|
115 |
+
self.image_paths = input['A_paths' if AtoB else 'B_paths']
|
116 |
+
|
117 |
+
def forward(self):
|
118 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
119 |
+
self.fake_B = self.netG_A(self.real_A) # G_A(A)
|
120 |
+
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
|
121 |
+
self.fake_A = self.netG_B(self.real_B) # G_B(B)
|
122 |
+
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
|
123 |
+
|
124 |
+
def backward_D_basic(self, netD, real, fake):
|
125 |
+
"""Calculate GAN loss for the discriminator
|
126 |
+
|
127 |
+
Parameters:
|
128 |
+
netD (network) -- the discriminator D
|
129 |
+
real (tensor array) -- real images
|
130 |
+
fake (tensor array) -- images generated by a generator
|
131 |
+
|
132 |
+
Return the discriminator loss.
|
133 |
+
We also call loss_D.backward() to calculate the gradients.
|
134 |
+
"""
|
135 |
+
# Real
|
136 |
+
pred_real = netD(real)
|
137 |
+
loss_D_real = self.criterionGAN(pred_real, True)
|
138 |
+
# Fake
|
139 |
+
pred_fake = netD(fake.detach())
|
140 |
+
loss_D_fake = self.criterionGAN(pred_fake, False)
|
141 |
+
# Combined loss and calculate gradients
|
142 |
+
loss_D = (loss_D_real + loss_D_fake) * 0.5
|
143 |
+
if self.opt.amp:
|
144 |
+
with amp.scale_loss(loss_D, self.optimizer_D) as scaled_loss:
|
145 |
+
scaled_loss.backward()
|
146 |
+
else:
|
147 |
+
loss_D.backward()
|
148 |
+
return loss_D
|
149 |
+
|
150 |
+
def backward_D_A(self):
|
151 |
+
"""Calculate GAN loss for discriminator D_A"""
|
152 |
+
fake_B = self.fake_B_pool.query(self.fake_B)
|
153 |
+
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
|
154 |
+
|
155 |
+
def backward_D_B(self):
|
156 |
+
"""Calculate GAN loss for discriminator D_B"""
|
157 |
+
fake_A = self.fake_A_pool.query(self.fake_A)
|
158 |
+
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
|
159 |
+
|
160 |
+
def backward_G(self):
|
161 |
+
"""Calculate the loss for generators G_A and G_B"""
|
162 |
+
lambda_idt = self.opt.lambda_identity
|
163 |
+
lambda_A = self.opt.lambda_A
|
164 |
+
lambda_B = self.opt.lambda_B
|
165 |
+
# Identity loss
|
166 |
+
if lambda_idt > 0:
|
167 |
+
# G_A should be identity if real_B is fed: ||G_A(B) - B||
|
168 |
+
self.idt_A = self.netG_A(self.real_B)
|
169 |
+
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
|
170 |
+
# G_B should be identity if real_A is fed: ||G_B(A) - A||
|
171 |
+
self.idt_B = self.netG_B(self.real_A)
|
172 |
+
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
|
173 |
+
else:
|
174 |
+
self.loss_idt_A = 0
|
175 |
+
self.loss_idt_B = 0
|
176 |
+
|
177 |
+
# GAN loss D_A(G_A(A))
|
178 |
+
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
|
179 |
+
# GAN loss D_B(G_B(B))
|
180 |
+
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
|
181 |
+
# Forward cycle loss || G_B(G_A(A)) - A||
|
182 |
+
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
|
183 |
+
# Backward cycle loss || G_A(G_B(B)) - B||
|
184 |
+
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
|
185 |
+
# combined loss and calculate gradients
|
186 |
+
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
|
187 |
+
if self.opt.amp:
|
188 |
+
with amp.scale_loss(self.loss_G, self.optimizer_G) as scaled_loss:
|
189 |
+
scaled_loss.backward()
|
190 |
+
else:
|
191 |
+
self.loss_G.backward()
|
192 |
+
|
193 |
+
def data_dependent_initialize(self):
|
194 |
+
return
|
195 |
+
|
196 |
+
def generate_visuals_for_evaluation(self, data, mode):
|
197 |
+
with torch.no_grad():
|
198 |
+
visuals = {}
|
199 |
+
AtoB = self.opt.direction == "AtoB"
|
200 |
+
G = self.netG_A
|
201 |
+
source = data["A" if AtoB else "B"].to(self.device)
|
202 |
+
if mode == "forward":
|
203 |
+
visuals["fake_B"] = G(source)
|
204 |
+
else:
|
205 |
+
raise ValueError("mode %s is not recognized" % mode)
|
206 |
+
return visuals
|
207 |
+
|
208 |
+
def optimize_parameters(self):
|
209 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
210 |
+
# forward
|
211 |
+
self.forward() # compute fake images and reconstruction images.
|
212 |
+
# G_A and G_B
|
213 |
+
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
|
214 |
+
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
|
215 |
+
self.backward_G() # calculate gradients for G_A and G_B
|
216 |
+
self.optimizer_G.step() # update G_A and G_B's weights
|
217 |
+
# D_A and D_B
|
218 |
+
self.set_requires_grad([self.netD_A, self.netD_B], True)
|
219 |
+
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
|
220 |
+
self.backward_D_A() # calculate gradients for D_A
|
221 |
+
self.backward_D_B() # calculate graidents for D_B
|
222 |
+
self.optimizer_D.step() # update D_A and D_B's weights
|
models/networks.py
ADDED
@@ -0,0 +1,1403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torch.nn import init
|
5 |
+
import functools
|
6 |
+
from torch.optim import lr_scheduler
|
7 |
+
import numpy as np
|
8 |
+
from .stylegan_networks import StyleGAN2Discriminator, StyleGAN2Generator, TileStyleGAN2Discriminator
|
9 |
+
|
10 |
+
###############################################################################
|
11 |
+
# Helper Functions
|
12 |
+
###############################################################################
|
13 |
+
|
14 |
+
|
15 |
+
def get_filter(filt_size=3):
|
16 |
+
if(filt_size == 1):
|
17 |
+
a = np.array([1., ])
|
18 |
+
elif(filt_size == 2):
|
19 |
+
a = np.array([1., 1.])
|
20 |
+
elif(filt_size == 3):
|
21 |
+
a = np.array([1., 2., 1.])
|
22 |
+
elif(filt_size == 4):
|
23 |
+
a = np.array([1., 3., 3., 1.])
|
24 |
+
elif(filt_size == 5):
|
25 |
+
a = np.array([1., 4., 6., 4., 1.])
|
26 |
+
elif(filt_size == 6):
|
27 |
+
a = np.array([1., 5., 10., 10., 5., 1.])
|
28 |
+
elif(filt_size == 7):
|
29 |
+
a = np.array([1., 6., 15., 20., 15., 6., 1.])
|
30 |
+
|
31 |
+
filt = torch.Tensor(a[:, None] * a[None, :])
|
32 |
+
filt = filt / torch.sum(filt)
|
33 |
+
|
34 |
+
return filt
|
35 |
+
|
36 |
+
|
37 |
+
class Downsample(nn.Module):
|
38 |
+
def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):
|
39 |
+
super(Downsample, self).__init__()
|
40 |
+
self.filt_size = filt_size
|
41 |
+
self.pad_off = pad_off
|
42 |
+
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
|
43 |
+
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
|
44 |
+
self.stride = stride
|
45 |
+
self.off = int((self.stride - 1) / 2.)
|
46 |
+
self.channels = channels
|
47 |
+
|
48 |
+
filt = get_filter(filt_size=self.filt_size)
|
49 |
+
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
|
50 |
+
|
51 |
+
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
|
52 |
+
|
53 |
+
def forward(self, inp):
|
54 |
+
if(self.filt_size == 1):
|
55 |
+
if(self.pad_off == 0):
|
56 |
+
return inp[:, :, ::self.stride, ::self.stride]
|
57 |
+
else:
|
58 |
+
return self.pad(inp)[:, :, ::self.stride, ::self.stride]
|
59 |
+
else:
|
60 |
+
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
|
61 |
+
|
62 |
+
|
63 |
+
class Upsample2(nn.Module):
|
64 |
+
def __init__(self, scale_factor, mode='nearest'):
|
65 |
+
super().__init__()
|
66 |
+
self.factor = scale_factor
|
67 |
+
self.mode = mode
|
68 |
+
|
69 |
+
def forward(self, x):
|
70 |
+
return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)
|
71 |
+
|
72 |
+
|
73 |
+
class Upsample(nn.Module):
|
74 |
+
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
|
75 |
+
super(Upsample, self).__init__()
|
76 |
+
self.filt_size = filt_size
|
77 |
+
self.filt_odd = np.mod(filt_size, 2) == 1
|
78 |
+
self.pad_size = int((filt_size - 1) / 2)
|
79 |
+
self.stride = stride
|
80 |
+
self.off = int((self.stride - 1) / 2.)
|
81 |
+
self.channels = channels
|
82 |
+
|
83 |
+
filt = get_filter(filt_size=self.filt_size) * (stride**2)
|
84 |
+
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
|
85 |
+
|
86 |
+
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
|
87 |
+
|
88 |
+
def forward(self, inp):
|
89 |
+
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
|
90 |
+
if(self.filt_odd):
|
91 |
+
return ret_val
|
92 |
+
else:
|
93 |
+
return ret_val[:, :, :-1, :-1]
|
94 |
+
|
95 |
+
|
96 |
+
def get_pad_layer(pad_type):
|
97 |
+
if(pad_type in ['refl', 'reflect']):
|
98 |
+
PadLayer = nn.ReflectionPad2d
|
99 |
+
elif(pad_type in ['repl', 'replicate']):
|
100 |
+
PadLayer = nn.ReplicationPad2d
|
101 |
+
elif(pad_type == 'zero'):
|
102 |
+
PadLayer = nn.ZeroPad2d
|
103 |
+
else:
|
104 |
+
print('Pad type [%s] not recognized' % pad_type)
|
105 |
+
return PadLayer
|
106 |
+
|
107 |
+
|
108 |
+
class Identity(nn.Module):
|
109 |
+
def forward(self, x):
|
110 |
+
return x
|
111 |
+
|
112 |
+
|
113 |
+
def get_norm_layer(norm_type='instance'):
|
114 |
+
"""Return a normalization layer
|
115 |
+
|
116 |
+
Parameters:
|
117 |
+
norm_type (str) -- the name of the normalization layer: batch | instance | none
|
118 |
+
|
119 |
+
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
|
120 |
+
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
|
121 |
+
"""
|
122 |
+
if norm_type == 'batch':
|
123 |
+
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
|
124 |
+
elif norm_type == 'instance':
|
125 |
+
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
126 |
+
elif norm_type == 'none':
|
127 |
+
def norm_layer(x):
|
128 |
+
return Identity()
|
129 |
+
else:
|
130 |
+
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
|
131 |
+
return norm_layer
|
132 |
+
|
133 |
+
|
134 |
+
def get_scheduler(optimizer, opt):
|
135 |
+
"""Return a learning rate scheduler
|
136 |
+
|
137 |
+
Parameters:
|
138 |
+
optimizer -- the optimizer of the network
|
139 |
+
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
|
140 |
+
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
|
141 |
+
|
142 |
+
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
|
143 |
+
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
|
144 |
+
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
|
145 |
+
See https://pytorch.org/docs/stable/optim.html for more details.
|
146 |
+
"""
|
147 |
+
if opt.lr_policy == 'linear':
|
148 |
+
def lambda_rule(epoch):
|
149 |
+
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
|
150 |
+
return lr_l
|
151 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
|
152 |
+
elif opt.lr_policy == 'step':
|
153 |
+
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
|
154 |
+
elif opt.lr_policy == 'plateau':
|
155 |
+
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
|
156 |
+
elif opt.lr_policy == 'cosine':
|
157 |
+
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
|
158 |
+
else:
|
159 |
+
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
|
160 |
+
return scheduler
|
161 |
+
|
162 |
+
|
163 |
+
def init_weights(net, init_type='normal', init_gain=0.02, debug=False):
|
164 |
+
"""Initialize network weights.
|
165 |
+
|
166 |
+
Parameters:
|
167 |
+
net (network) -- network to be initialized
|
168 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
169 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
170 |
+
|
171 |
+
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
|
172 |
+
work better for some applications. Feel free to try yourself.
|
173 |
+
"""
|
174 |
+
def init_func(m): # define the initialization function
|
175 |
+
classname = m.__class__.__name__
|
176 |
+
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
|
177 |
+
if debug:
|
178 |
+
print(classname)
|
179 |
+
if init_type == 'normal':
|
180 |
+
init.normal_(m.weight.data, 0.0, init_gain)
|
181 |
+
elif init_type == 'xavier':
|
182 |
+
init.xavier_normal_(m.weight.data, gain=init_gain)
|
183 |
+
elif init_type == 'kaiming':
|
184 |
+
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
|
185 |
+
elif init_type == 'orthogonal':
|
186 |
+
init.orthogonal_(m.weight.data, gain=init_gain)
|
187 |
+
else:
|
188 |
+
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
|
189 |
+
if hasattr(m, 'bias') and m.bias is not None:
|
190 |
+
init.constant_(m.bias.data, 0.0)
|
191 |
+
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
|
192 |
+
init.normal_(m.weight.data, 1.0, init_gain)
|
193 |
+
init.constant_(m.bias.data, 0.0)
|
194 |
+
|
195 |
+
net.apply(init_func) # apply the initialization function <init_func>
|
196 |
+
|
197 |
+
|
198 |
+
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[], debug=False, initialize_weights=True):
|
199 |
+
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
|
200 |
+
Parameters:
|
201 |
+
net (network) -- the network to be initialized
|
202 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
203 |
+
gain (float) -- scaling factor for normal, xavier and orthogonal.
|
204 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
205 |
+
|
206 |
+
Return an initialized network.
|
207 |
+
"""
|
208 |
+
if len(gpu_ids) > 0:
|
209 |
+
assert(torch.cuda.is_available())
|
210 |
+
net.to(gpu_ids[0])
|
211 |
+
# if not amp:
|
212 |
+
# net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs for non-AMP training
|
213 |
+
if initialize_weights:
|
214 |
+
init_weights(net, init_type, init_gain=init_gain, debug=debug)
|
215 |
+
return net
|
216 |
+
|
217 |
+
|
218 |
+
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal',
|
219 |
+
init_gain=0.02, no_antialias=False, no_antialias_up=False, gpu_ids=[], opt=None):
|
220 |
+
"""Create a generator
|
221 |
+
|
222 |
+
Parameters:
|
223 |
+
input_nc (int) -- the number of channels in input images
|
224 |
+
output_nc (int) -- the number of channels in output images
|
225 |
+
ngf (int) -- the number of filters in the last conv layer
|
226 |
+
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
|
227 |
+
norm (str) -- the name of normalization layers used in the network: batch | instance | none
|
228 |
+
use_dropout (bool) -- if use dropout layers.
|
229 |
+
init_type (str) -- the name of our initialization method.
|
230 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
231 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
232 |
+
|
233 |
+
Returns a generator
|
234 |
+
|
235 |
+
Our current implementation provides two types of generators:
|
236 |
+
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
|
237 |
+
The original U-Net paper: https://arxiv.org/abs/1505.04597
|
238 |
+
|
239 |
+
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
|
240 |
+
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
|
241 |
+
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
|
242 |
+
|
243 |
+
|
244 |
+
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
|
245 |
+
"""
|
246 |
+
net = None
|
247 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
248 |
+
|
249 |
+
if netG == 'resnet_9blocks':
|
250 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=9, opt=opt)
|
251 |
+
elif netG == 'resnet_6blocks':
|
252 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=6, opt=opt)
|
253 |
+
elif netG == 'resnet_4blocks':
|
254 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, no_antialias=no_antialias, no_antialias_up=no_antialias_up, n_blocks=4, opt=opt)
|
255 |
+
elif netG == 'unet_128':
|
256 |
+
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
257 |
+
elif netG == 'unet_256':
|
258 |
+
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
259 |
+
elif netG == 'stylegan2':
|
260 |
+
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, opt=opt)
|
261 |
+
elif netG == 'smallstylegan2':
|
262 |
+
net = StyleGAN2Generator(input_nc, output_nc, ngf, use_dropout=use_dropout, n_blocks=2, opt=opt)
|
263 |
+
elif netG == 'resnet_cat':
|
264 |
+
n_blocks = 8
|
265 |
+
net = G_Resnet(input_nc, output_nc, opt.nz, num_downs=2, n_res=n_blocks - 4, ngf=ngf, norm='inst', nl_layer='relu')
|
266 |
+
else:
|
267 |
+
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
|
268 |
+
return init_net(net, init_type, init_gain, gpu_ids, initialize_weights=('stylegan2' not in netG))
|
269 |
+
|
270 |
+
|
271 |
+
def define_F(input_nc, netF, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
|
272 |
+
if netF == 'global_pool':
|
273 |
+
net = PoolingF()
|
274 |
+
elif netF == 'reshape':
|
275 |
+
net = ReshapeF()
|
276 |
+
elif netF == 'sample':
|
277 |
+
net = PatchSampleF(use_mlp=False, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
|
278 |
+
elif netF == 'mlp_sample':
|
279 |
+
net = PatchSampleF(use_mlp=True, init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids, nc=opt.netF_nc)
|
280 |
+
elif netF == 'strided_conv':
|
281 |
+
net = StridedConvF(init_type=init_type, init_gain=init_gain, gpu_ids=gpu_ids)
|
282 |
+
else:
|
283 |
+
raise NotImplementedError('projection model name [%s] is not recognized' % netF)
|
284 |
+
return init_net(net, init_type, init_gain, gpu_ids)
|
285 |
+
|
286 |
+
|
287 |
+
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, no_antialias=False, gpu_ids=[], opt=None):
|
288 |
+
"""Create a discriminator
|
289 |
+
|
290 |
+
Parameters:
|
291 |
+
input_nc (int) -- the number of channels in input images
|
292 |
+
ndf (int) -- the number of filters in the first conv layer
|
293 |
+
netD (str) -- the architecture's name: basic | n_layers | pixel
|
294 |
+
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
|
295 |
+
norm (str) -- the type of normalization layers used in the network.
|
296 |
+
init_type (str) -- the name of the initialization method.
|
297 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
298 |
+
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
299 |
+
|
300 |
+
Returns a discriminator
|
301 |
+
|
302 |
+
Our current implementation provides three types of discriminators:
|
303 |
+
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
|
304 |
+
It can classify whether 70×70 overlapping patches are real or fake.
|
305 |
+
Such a patch-level discriminator architecture has fewer parameters
|
306 |
+
than a full-image discriminator and can work on arbitrarily-sized images
|
307 |
+
in a fully convolutional fashion.
|
308 |
+
|
309 |
+
[n_layers]: With this mode, you cna specify the number of conv layers in the discriminator
|
310 |
+
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
|
311 |
+
|
312 |
+
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
|
313 |
+
It encourages greater color diversity but has no effect on spatial statistics.
|
314 |
+
|
315 |
+
The discriminator has been initialized by <init_net>. It uses Leaky RELU for non-linearity.
|
316 |
+
"""
|
317 |
+
net = None
|
318 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
319 |
+
|
320 |
+
if netD == 'basic': # default PatchGAN classifier
|
321 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, no_antialias=no_antialias,)
|
322 |
+
elif netD == 'n_layers': # more options
|
323 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, no_antialias=no_antialias,)
|
324 |
+
elif netD == 'pixel': # classify if each pixel is real or fake
|
325 |
+
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
|
326 |
+
elif 'stylegan2' in netD:
|
327 |
+
net = StyleGAN2Discriminator(input_nc, ndf, n_layers_D, no_antialias=no_antialias, opt=opt)
|
328 |
+
else:
|
329 |
+
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
|
330 |
+
return init_net(net, init_type, init_gain, gpu_ids,
|
331 |
+
initialize_weights=('stylegan2' not in netD))
|
332 |
+
|
333 |
+
|
334 |
+
##############################################################################
|
335 |
+
# Classes
|
336 |
+
##############################################################################
|
337 |
+
class GANLoss(nn.Module):
|
338 |
+
"""Define different GAN objectives.
|
339 |
+
|
340 |
+
The GANLoss class abstracts away the need to create the target label tensor
|
341 |
+
that has the same size as the input.
|
342 |
+
"""
|
343 |
+
|
344 |
+
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
|
345 |
+
""" Initialize the GANLoss class.
|
346 |
+
|
347 |
+
Parameters:
|
348 |
+
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
|
349 |
+
target_real_label (bool) - - label for a real image
|
350 |
+
target_fake_label (bool) - - label of a fake image
|
351 |
+
|
352 |
+
Note: Do not use sigmoid as the last layer of Discriminator.
|
353 |
+
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
|
354 |
+
"""
|
355 |
+
super(GANLoss, self).__init__()
|
356 |
+
self.register_buffer('real_label', torch.tensor(target_real_label))
|
357 |
+
self.register_buffer('fake_label', torch.tensor(target_fake_label))
|
358 |
+
self.gan_mode = gan_mode
|
359 |
+
if gan_mode == 'lsgan':
|
360 |
+
self.loss = nn.MSELoss()
|
361 |
+
elif gan_mode == 'vanilla':
|
362 |
+
self.loss = nn.BCEWithLogitsLoss()
|
363 |
+
elif gan_mode in ['wgangp', 'nonsaturating']:
|
364 |
+
self.loss = None
|
365 |
+
else:
|
366 |
+
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
|
367 |
+
|
368 |
+
def get_target_tensor(self, prediction, target_is_real):
|
369 |
+
"""Create label tensors with the same size as the input.
|
370 |
+
|
371 |
+
Parameters:
|
372 |
+
prediction (tensor) - - tpyically the prediction from a discriminator
|
373 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
374 |
+
|
375 |
+
Returns:
|
376 |
+
A label tensor filled with ground truth label, and with the size of the input
|
377 |
+
"""
|
378 |
+
|
379 |
+
if target_is_real:
|
380 |
+
target_tensor = self.real_label
|
381 |
+
else:
|
382 |
+
target_tensor = self.fake_label
|
383 |
+
return target_tensor.expand_as(prediction)
|
384 |
+
|
385 |
+
def __call__(self, prediction, target_is_real):
|
386 |
+
"""Calculate loss given Discriminator's output and grount truth labels.
|
387 |
+
|
388 |
+
Parameters:
|
389 |
+
prediction (tensor) - - tpyically the prediction output from a discriminator
|
390 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
391 |
+
|
392 |
+
Returns:
|
393 |
+
the calculated loss.
|
394 |
+
"""
|
395 |
+
bs = prediction.size(0)
|
396 |
+
if self.gan_mode in ['lsgan', 'vanilla']:
|
397 |
+
target_tensor = self.get_target_tensor(prediction, target_is_real)
|
398 |
+
loss = self.loss(prediction, target_tensor)
|
399 |
+
elif self.gan_mode == 'wgangp':
|
400 |
+
if target_is_real:
|
401 |
+
loss = -prediction.mean()
|
402 |
+
else:
|
403 |
+
loss = prediction.mean()
|
404 |
+
elif self.gan_mode == 'nonsaturating':
|
405 |
+
if target_is_real:
|
406 |
+
loss = F.softplus(-prediction).view(bs, -1).mean(dim=1)
|
407 |
+
else:
|
408 |
+
loss = F.softplus(prediction).view(bs, -1).mean(dim=1)
|
409 |
+
return loss
|
410 |
+
|
411 |
+
|
412 |
+
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
|
413 |
+
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
|
414 |
+
|
415 |
+
Arguments:
|
416 |
+
netD (network) -- discriminator network
|
417 |
+
real_data (tensor array) -- real images
|
418 |
+
fake_data (tensor array) -- generated images from the generator
|
419 |
+
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
|
420 |
+
type (str) -- if we mix real and fake data or not [real | fake | mixed].
|
421 |
+
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
|
422 |
+
lambda_gp (float) -- weight for this loss
|
423 |
+
|
424 |
+
Returns the gradient penalty loss
|
425 |
+
"""
|
426 |
+
if lambda_gp > 0.0:
|
427 |
+
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
|
428 |
+
interpolatesv = real_data
|
429 |
+
elif type == 'fake':
|
430 |
+
interpolatesv = fake_data
|
431 |
+
elif type == 'mixed':
|
432 |
+
alpha = torch.rand(real_data.shape[0], 1, device=device)
|
433 |
+
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
|
434 |
+
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
|
435 |
+
else:
|
436 |
+
raise NotImplementedError('{} not implemented'.format(type))
|
437 |
+
interpolatesv.requires_grad_(True)
|
438 |
+
disc_interpolates = netD(interpolatesv)
|
439 |
+
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
|
440 |
+
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
|
441 |
+
create_graph=True, retain_graph=True, only_inputs=True)
|
442 |
+
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
|
443 |
+
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
|
444 |
+
return gradient_penalty, gradients
|
445 |
+
else:
|
446 |
+
return 0.0, None
|
447 |
+
|
448 |
+
|
449 |
+
class Normalize(nn.Module):
|
450 |
+
|
451 |
+
def __init__(self, power=2):
|
452 |
+
super(Normalize, self).__init__()
|
453 |
+
self.power = power
|
454 |
+
|
455 |
+
def forward(self, x):
|
456 |
+
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
|
457 |
+
out = x.div(norm + 1e-7)
|
458 |
+
return out
|
459 |
+
|
460 |
+
|
461 |
+
class PoolingF(nn.Module):
|
462 |
+
def __init__(self):
|
463 |
+
super(PoolingF, self).__init__()
|
464 |
+
model = [nn.AdaptiveMaxPool2d(1)]
|
465 |
+
self.model = nn.Sequential(*model)
|
466 |
+
self.l2norm = Normalize(2)
|
467 |
+
|
468 |
+
def forward(self, x):
|
469 |
+
return self.l2norm(self.model(x))
|
470 |
+
|
471 |
+
|
472 |
+
class ReshapeF(nn.Module):
|
473 |
+
def __init__(self):
|
474 |
+
super(ReshapeF, self).__init__()
|
475 |
+
model = [nn.AdaptiveAvgPool2d(4)]
|
476 |
+
self.model = nn.Sequential(*model)
|
477 |
+
self.l2norm = Normalize(2)
|
478 |
+
|
479 |
+
def forward(self, x):
|
480 |
+
x = self.model(x)
|
481 |
+
x_reshape = x.permute(0, 2, 3, 1).flatten(0, 2)
|
482 |
+
return self.l2norm(x_reshape)
|
483 |
+
|
484 |
+
|
485 |
+
class StridedConvF(nn.Module):
|
486 |
+
def __init__(self, init_type='normal', init_gain=0.02, gpu_ids=[]):
|
487 |
+
super().__init__()
|
488 |
+
# self.conv1 = nn.Conv2d(256, 128, 3, stride=2)
|
489 |
+
# self.conv2 = nn.Conv2d(128, 64, 3, stride=1)
|
490 |
+
self.l2_norm = Normalize(2)
|
491 |
+
self.mlps = {}
|
492 |
+
self.moving_averages = {}
|
493 |
+
self.init_type = init_type
|
494 |
+
self.init_gain = init_gain
|
495 |
+
self.gpu_ids = gpu_ids
|
496 |
+
|
497 |
+
def create_mlp(self, x):
|
498 |
+
C, H = x.shape[1], x.shape[2]
|
499 |
+
n_down = int(np.rint(np.log2(H / 32)))
|
500 |
+
mlp = []
|
501 |
+
for i in range(n_down):
|
502 |
+
mlp.append(nn.Conv2d(C, max(C // 2, 64), 3, stride=2))
|
503 |
+
mlp.append(nn.ReLU())
|
504 |
+
C = max(C // 2, 64)
|
505 |
+
mlp.append(nn.Conv2d(C, 64, 3))
|
506 |
+
mlp = nn.Sequential(*mlp)
|
507 |
+
init_net(mlp, self.init_type, self.init_gain, self.gpu_ids)
|
508 |
+
return mlp
|
509 |
+
|
510 |
+
def update_moving_average(self, key, x):
|
511 |
+
if key not in self.moving_averages:
|
512 |
+
self.moving_averages[key] = x.detach()
|
513 |
+
|
514 |
+
self.moving_averages[key] = self.moving_averages[key] * 0.999 + x.detach() * 0.001
|
515 |
+
|
516 |
+
def forward(self, x, use_instance_norm=False):
|
517 |
+
C, H = x.shape[1], x.shape[2]
|
518 |
+
key = '%d_%d' % (C, H)
|
519 |
+
if key not in self.mlps:
|
520 |
+
self.mlps[key] = self.create_mlp(x)
|
521 |
+
self.add_module("child_%s" % key, self.mlps[key])
|
522 |
+
mlp = self.mlps[key]
|
523 |
+
x = mlp(x)
|
524 |
+
self.update_moving_average(key, x)
|
525 |
+
x = x - self.moving_averages[key]
|
526 |
+
if use_instance_norm:
|
527 |
+
x = F.instance_norm(x)
|
528 |
+
return self.l2_norm(x)
|
529 |
+
|
530 |
+
|
531 |
+
class PatchSampleF(nn.Module):
|
532 |
+
def __init__(self, use_mlp=False, init_type='normal', init_gain=0.02, nc=256, gpu_ids=[]):
|
533 |
+
# potential issues: currently, we use the same patch_ids for multiple images in the batch
|
534 |
+
super(PatchSampleF, self).__init__()
|
535 |
+
self.l2norm = Normalize(2)
|
536 |
+
self.use_mlp = use_mlp
|
537 |
+
self.nc = nc # hard-coded
|
538 |
+
self.mlp_init = False
|
539 |
+
self.init_type = init_type
|
540 |
+
self.init_gain = init_gain
|
541 |
+
self.gpu_ids = gpu_ids
|
542 |
+
|
543 |
+
def create_mlp(self, feats):
|
544 |
+
for mlp_id, feat in enumerate(feats):
|
545 |
+
input_nc = feat.shape[1]
|
546 |
+
mlp = nn.Sequential(*[nn.Linear(input_nc, self.nc), nn.ReLU(), nn.Linear(self.nc, self.nc)])
|
547 |
+
if len(self.gpu_ids) > 0:
|
548 |
+
mlp.cuda()
|
549 |
+
setattr(self, 'mlp_%d' % mlp_id, mlp)
|
550 |
+
init_net(self, self.init_type, self.init_gain, self.gpu_ids)
|
551 |
+
self.mlp_init = True
|
552 |
+
|
553 |
+
def forward(self, feats, num_patches=64, patch_ids=None):
|
554 |
+
return_ids = []
|
555 |
+
return_feats = []
|
556 |
+
if self.use_mlp and not self.mlp_init:
|
557 |
+
self.create_mlp(feats)
|
558 |
+
for feat_id, feat in enumerate(feats):
|
559 |
+
B, H, W = feat.shape[0], feat.shape[2], feat.shape[3]
|
560 |
+
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2)
|
561 |
+
if num_patches > 0:
|
562 |
+
if patch_ids is not None:
|
563 |
+
patch_id = patch_ids[feat_id]
|
564 |
+
else:
|
565 |
+
# torch.randperm produces cudaErrorIllegalAddress for newer versions of PyTorch. https://github.com/taesungp/contrastive-unpaired-translation/issues/83
|
566 |
+
#patch_id = torch.randperm(feat_reshape.shape[1], device=feats[0].device)
|
567 |
+
patch_id = np.random.permutation(feat_reshape.shape[1])
|
568 |
+
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
|
569 |
+
patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
|
570 |
+
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
|
571 |
+
else:
|
572 |
+
x_sample = feat_reshape
|
573 |
+
patch_id = []
|
574 |
+
if self.use_mlp:
|
575 |
+
mlp = getattr(self, 'mlp_%d' % feat_id)
|
576 |
+
x_sample = mlp(x_sample)
|
577 |
+
return_ids.append(patch_id)
|
578 |
+
x_sample = self.l2norm(x_sample)
|
579 |
+
|
580 |
+
if num_patches == 0:
|
581 |
+
x_sample = x_sample.permute(0, 2, 1).reshape([B, x_sample.shape[-1], H, W])
|
582 |
+
return_feats.append(x_sample)
|
583 |
+
return return_feats, return_ids
|
584 |
+
|
585 |
+
|
586 |
+
class G_Resnet(nn.Module):
|
587 |
+
def __init__(self, input_nc, output_nc, nz, num_downs, n_res, ngf=64,
|
588 |
+
norm=None, nl_layer=None):
|
589 |
+
super(G_Resnet, self).__init__()
|
590 |
+
n_downsample = num_downs
|
591 |
+
pad_type = 'reflect'
|
592 |
+
self.enc_content = ContentEncoder(n_downsample, n_res, input_nc, ngf, norm, nl_layer, pad_type=pad_type)
|
593 |
+
if nz == 0:
|
594 |
+
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
|
595 |
+
else:
|
596 |
+
self.dec = Decoder_all(n_downsample, n_res, self.enc_content.output_dim, output_nc, norm=norm, activ=nl_layer, pad_type=pad_type, nz=nz)
|
597 |
+
|
598 |
+
def decode(self, content, style=None):
|
599 |
+
return self.dec(content, style)
|
600 |
+
|
601 |
+
def forward(self, image, style=None, nce_layers=[], encode_only=False):
|
602 |
+
content, feats = self.enc_content(image, nce_layers=nce_layers, encode_only=encode_only)
|
603 |
+
if encode_only:
|
604 |
+
return feats
|
605 |
+
else:
|
606 |
+
images_recon = self.decode(content, style)
|
607 |
+
if len(nce_layers) > 0:
|
608 |
+
return images_recon, feats
|
609 |
+
else:
|
610 |
+
return images_recon
|
611 |
+
|
612 |
+
##################################################################################
|
613 |
+
# Encoder and Decoders
|
614 |
+
##################################################################################
|
615 |
+
|
616 |
+
|
617 |
+
class E_adaIN(nn.Module):
|
618 |
+
def __init__(self, input_nc, output_nc=1, nef=64, n_layers=4,
|
619 |
+
norm=None, nl_layer=None, vae=False):
|
620 |
+
# style encoder
|
621 |
+
super(E_adaIN, self).__init__()
|
622 |
+
self.enc_style = StyleEncoder(n_layers, input_nc, nef, output_nc, norm='none', activ='relu', vae=vae)
|
623 |
+
|
624 |
+
def forward(self, image):
|
625 |
+
style = self.enc_style(image)
|
626 |
+
return style
|
627 |
+
|
628 |
+
|
629 |
+
class StyleEncoder(nn.Module):
|
630 |
+
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, vae=False):
|
631 |
+
super(StyleEncoder, self).__init__()
|
632 |
+
self.vae = vae
|
633 |
+
self.model = []
|
634 |
+
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
|
635 |
+
for i in range(2):
|
636 |
+
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
|
637 |
+
dim *= 2
|
638 |
+
for i in range(n_downsample - 2):
|
639 |
+
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
|
640 |
+
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
|
641 |
+
if self.vae:
|
642 |
+
self.fc_mean = nn.Linear(dim, style_dim) # , 1, 1, 0)
|
643 |
+
self.fc_var = nn.Linear(dim, style_dim) # , 1, 1, 0)
|
644 |
+
else:
|
645 |
+
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
|
646 |
+
|
647 |
+
self.model = nn.Sequential(*self.model)
|
648 |
+
self.output_dim = dim
|
649 |
+
|
650 |
+
def forward(self, x):
|
651 |
+
if self.vae:
|
652 |
+
output = self.model(x)
|
653 |
+
output = output.view(x.size(0), -1)
|
654 |
+
output_mean = self.fc_mean(output)
|
655 |
+
output_var = self.fc_var(output)
|
656 |
+
return output_mean, output_var
|
657 |
+
else:
|
658 |
+
return self.model(x).view(x.size(0), -1)
|
659 |
+
|
660 |
+
|
661 |
+
class ContentEncoder(nn.Module):
|
662 |
+
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type='zero'):
|
663 |
+
super(ContentEncoder, self).__init__()
|
664 |
+
self.model = []
|
665 |
+
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type='reflect')]
|
666 |
+
# downsampling blocks
|
667 |
+
for i in range(n_downsample):
|
668 |
+
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type='reflect')]
|
669 |
+
dim *= 2
|
670 |
+
# residual blocks
|
671 |
+
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
|
672 |
+
self.model = nn.Sequential(*self.model)
|
673 |
+
self.output_dim = dim
|
674 |
+
|
675 |
+
def forward(self, x, nce_layers=[], encode_only=False):
|
676 |
+
if len(nce_layers) > 0:
|
677 |
+
feat = x
|
678 |
+
feats = []
|
679 |
+
for layer_id, layer in enumerate(self.model):
|
680 |
+
feat = layer(feat)
|
681 |
+
if layer_id in nce_layers:
|
682 |
+
feats.append(feat)
|
683 |
+
if layer_id == nce_layers[-1] and encode_only:
|
684 |
+
return None, feats
|
685 |
+
return feat, feats
|
686 |
+
else:
|
687 |
+
return self.model(x), None
|
688 |
+
|
689 |
+
for layer_id, layer in enumerate(self.model):
|
690 |
+
print(layer_id, layer)
|
691 |
+
|
692 |
+
|
693 |
+
class Decoder_all(nn.Module):
|
694 |
+
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
|
695 |
+
super(Decoder_all, self).__init__()
|
696 |
+
# AdaIN residual blocks
|
697 |
+
self.resnet_block = ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)
|
698 |
+
self.n_blocks = 0
|
699 |
+
# upsampling blocks
|
700 |
+
for i in range(n_upsample):
|
701 |
+
block = [Upsample2(scale_factor=2), Conv2dBlock(dim + nz, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
|
702 |
+
setattr(self, 'block_{:d}'.format(self.n_blocks), nn.Sequential(*block))
|
703 |
+
self.n_blocks += 1
|
704 |
+
dim //= 2
|
705 |
+
# use reflection padding in the last conv layer
|
706 |
+
setattr(self, 'block_{:d}'.format(self.n_blocks), Conv2dBlock(dim + nz, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect'))
|
707 |
+
self.n_blocks += 1
|
708 |
+
|
709 |
+
def forward(self, x, y=None):
|
710 |
+
if y is not None:
|
711 |
+
output = self.resnet_block(cat_feature(x, y))
|
712 |
+
for n in range(self.n_blocks):
|
713 |
+
block = getattr(self, 'block_{:d}'.format(n))
|
714 |
+
if n > 0:
|
715 |
+
output = block(cat_feature(output, y))
|
716 |
+
else:
|
717 |
+
output = block(output)
|
718 |
+
return output
|
719 |
+
|
720 |
+
|
721 |
+
class Decoder(nn.Module):
|
722 |
+
def __init__(self, n_upsample, n_res, dim, output_dim, norm='batch', activ='relu', pad_type='zero', nz=0):
|
723 |
+
super(Decoder, self).__init__()
|
724 |
+
|
725 |
+
self.model = []
|
726 |
+
# AdaIN residual blocks
|
727 |
+
self.model += [ResBlocks(n_res, dim, norm, activ, pad_type=pad_type, nz=nz)]
|
728 |
+
# upsampling blocks
|
729 |
+
for i in range(n_upsample):
|
730 |
+
if i == 0:
|
731 |
+
input_dim = dim + nz
|
732 |
+
else:
|
733 |
+
input_dim = dim
|
734 |
+
self.model += [Upsample2(scale_factor=2), Conv2dBlock(input_dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type='reflect')]
|
735 |
+
dim //= 2
|
736 |
+
# use reflection padding in the last conv layer
|
737 |
+
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type='reflect')]
|
738 |
+
self.model = nn.Sequential(*self.model)
|
739 |
+
|
740 |
+
def forward(self, x, y=None):
|
741 |
+
if y is not None:
|
742 |
+
return self.model(cat_feature(x, y))
|
743 |
+
else:
|
744 |
+
return self.model(x)
|
745 |
+
|
746 |
+
##################################################################################
|
747 |
+
# Sequential Models
|
748 |
+
##################################################################################
|
749 |
+
|
750 |
+
|
751 |
+
class ResBlocks(nn.Module):
|
752 |
+
def __init__(self, num_blocks, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
|
753 |
+
super(ResBlocks, self).__init__()
|
754 |
+
self.model = []
|
755 |
+
for i in range(num_blocks):
|
756 |
+
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type, nz=nz)]
|
757 |
+
self.model = nn.Sequential(*self.model)
|
758 |
+
|
759 |
+
def forward(self, x):
|
760 |
+
return self.model(x)
|
761 |
+
|
762 |
+
|
763 |
+
##################################################################################
|
764 |
+
# Basic Blocks
|
765 |
+
##################################################################################
|
766 |
+
def cat_feature(x, y):
|
767 |
+
y_expand = y.view(y.size(0), y.size(1), 1, 1).expand(
|
768 |
+
y.size(0), y.size(1), x.size(2), x.size(3))
|
769 |
+
x_cat = torch.cat([x, y_expand], 1)
|
770 |
+
return x_cat
|
771 |
+
|
772 |
+
|
773 |
+
class ResBlock(nn.Module):
|
774 |
+
def __init__(self, dim, norm='inst', activation='relu', pad_type='zero', nz=0):
|
775 |
+
super(ResBlock, self).__init__()
|
776 |
+
|
777 |
+
model = []
|
778 |
+
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
|
779 |
+
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
|
780 |
+
self.model = nn.Sequential(*model)
|
781 |
+
|
782 |
+
def forward(self, x):
|
783 |
+
residual = x
|
784 |
+
out = self.model(x)
|
785 |
+
out += residual
|
786 |
+
return out
|
787 |
+
|
788 |
+
|
789 |
+
class Conv2dBlock(nn.Module):
|
790 |
+
def __init__(self, input_dim, output_dim, kernel_size, stride,
|
791 |
+
padding=0, norm='none', activation='relu', pad_type='zero'):
|
792 |
+
super(Conv2dBlock, self).__init__()
|
793 |
+
self.use_bias = True
|
794 |
+
# initialize padding
|
795 |
+
if pad_type == 'reflect':
|
796 |
+
self.pad = nn.ReflectionPad2d(padding)
|
797 |
+
elif pad_type == 'zero':
|
798 |
+
self.pad = nn.ZeroPad2d(padding)
|
799 |
+
else:
|
800 |
+
assert 0, "Unsupported padding type: {}".format(pad_type)
|
801 |
+
|
802 |
+
# initialize normalization
|
803 |
+
norm_dim = output_dim
|
804 |
+
if norm == 'batch':
|
805 |
+
self.norm = nn.BatchNorm2d(norm_dim)
|
806 |
+
elif norm == 'inst':
|
807 |
+
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
|
808 |
+
elif norm == 'ln':
|
809 |
+
self.norm = LayerNorm(norm_dim)
|
810 |
+
elif norm == 'none':
|
811 |
+
self.norm = None
|
812 |
+
else:
|
813 |
+
assert 0, "Unsupported normalization: {}".format(norm)
|
814 |
+
|
815 |
+
# initialize activation
|
816 |
+
if activation == 'relu':
|
817 |
+
self.activation = nn.ReLU(inplace=True)
|
818 |
+
elif activation == 'lrelu':
|
819 |
+
self.activation = nn.LeakyReLU(0.2, inplace=True)
|
820 |
+
elif activation == 'prelu':
|
821 |
+
self.activation = nn.PReLU()
|
822 |
+
elif activation == 'selu':
|
823 |
+
self.activation = nn.SELU(inplace=True)
|
824 |
+
elif activation == 'tanh':
|
825 |
+
self.activation = nn.Tanh()
|
826 |
+
elif activation == 'none':
|
827 |
+
self.activation = None
|
828 |
+
else:
|
829 |
+
assert 0, "Unsupported activation: {}".format(activation)
|
830 |
+
|
831 |
+
# initialize convolution
|
832 |
+
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
|
833 |
+
|
834 |
+
def forward(self, x):
|
835 |
+
x = self.conv(self.pad(x))
|
836 |
+
if self.norm:
|
837 |
+
x = self.norm(x)
|
838 |
+
if self.activation:
|
839 |
+
x = self.activation(x)
|
840 |
+
return x
|
841 |
+
|
842 |
+
|
843 |
+
class LinearBlock(nn.Module):
|
844 |
+
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
|
845 |
+
super(LinearBlock, self).__init__()
|
846 |
+
use_bias = True
|
847 |
+
# initialize fully connected layer
|
848 |
+
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
|
849 |
+
|
850 |
+
# initialize normalization
|
851 |
+
norm_dim = output_dim
|
852 |
+
if norm == 'batch':
|
853 |
+
self.norm = nn.BatchNorm1d(norm_dim)
|
854 |
+
elif norm == 'inst':
|
855 |
+
self.norm = nn.InstanceNorm1d(norm_dim)
|
856 |
+
elif norm == 'ln':
|
857 |
+
self.norm = LayerNorm(norm_dim)
|
858 |
+
elif norm == 'none':
|
859 |
+
self.norm = None
|
860 |
+
else:
|
861 |
+
assert 0, "Unsupported normalization: {}".format(norm)
|
862 |
+
|
863 |
+
# initialize activation
|
864 |
+
if activation == 'relu':
|
865 |
+
self.activation = nn.ReLU(inplace=True)
|
866 |
+
elif activation == 'lrelu':
|
867 |
+
self.activation = nn.LeakyReLU(0.2, inplace=True)
|
868 |
+
elif activation == 'prelu':
|
869 |
+
self.activation = nn.PReLU()
|
870 |
+
elif activation == 'selu':
|
871 |
+
self.activation = nn.SELU(inplace=True)
|
872 |
+
elif activation == 'tanh':
|
873 |
+
self.activation = nn.Tanh()
|
874 |
+
elif activation == 'none':
|
875 |
+
self.activation = None
|
876 |
+
else:
|
877 |
+
assert 0, "Unsupported activation: {}".format(activation)
|
878 |
+
|
879 |
+
def forward(self, x):
|
880 |
+
out = self.fc(x)
|
881 |
+
if self.norm:
|
882 |
+
out = self.norm(out)
|
883 |
+
if self.activation:
|
884 |
+
out = self.activation(out)
|
885 |
+
return out
|
886 |
+
|
887 |
+
##################################################################################
|
888 |
+
# Normalization layers
|
889 |
+
##################################################################################
|
890 |
+
|
891 |
+
|
892 |
+
class LayerNorm(nn.Module):
|
893 |
+
def __init__(self, num_features, eps=1e-5, affine=True):
|
894 |
+
super(LayerNorm, self).__init__()
|
895 |
+
self.num_features = num_features
|
896 |
+
self.affine = affine
|
897 |
+
self.eps = eps
|
898 |
+
|
899 |
+
if self.affine:
|
900 |
+
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
|
901 |
+
self.beta = nn.Parameter(torch.zeros(num_features))
|
902 |
+
|
903 |
+
def forward(self, x):
|
904 |
+
shape = [-1] + [1] * (x.dim() - 1)
|
905 |
+
mean = x.view(x.size(0), -1).mean(1).view(*shape)
|
906 |
+
std = x.view(x.size(0), -1).std(1).view(*shape)
|
907 |
+
x = (x - mean) / (std + self.eps)
|
908 |
+
|
909 |
+
if self.affine:
|
910 |
+
shape = [1, -1] + [1] * (x.dim() - 2)
|
911 |
+
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
|
912 |
+
return x
|
913 |
+
|
914 |
+
|
915 |
+
class ResnetGenerator(nn.Module):
|
916 |
+
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
|
917 |
+
|
918 |
+
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
|
919 |
+
"""
|
920 |
+
|
921 |
+
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):
|
922 |
+
"""Construct a Resnet-based generator
|
923 |
+
|
924 |
+
Parameters:
|
925 |
+
input_nc (int) -- the number of channels in input images
|
926 |
+
output_nc (int) -- the number of channels in output images
|
927 |
+
ngf (int) -- the number of filters in the last conv layer
|
928 |
+
norm_layer -- normalization layer
|
929 |
+
use_dropout (bool) -- if use dropout layers
|
930 |
+
n_blocks (int) -- the number of ResNet blocks
|
931 |
+
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
932 |
+
"""
|
933 |
+
assert(n_blocks >= 0)
|
934 |
+
super(ResnetGenerator, self).__init__()
|
935 |
+
self.opt = opt
|
936 |
+
if type(norm_layer) == functools.partial:
|
937 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
938 |
+
else:
|
939 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
940 |
+
|
941 |
+
model = [nn.ReflectionPad2d(3),
|
942 |
+
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
|
943 |
+
norm_layer(ngf),
|
944 |
+
nn.ReLU(True)]
|
945 |
+
|
946 |
+
n_downsampling = 2
|
947 |
+
for i in range(n_downsampling): # add downsampling layers
|
948 |
+
mult = 2 ** i
|
949 |
+
if(no_antialias):
|
950 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
|
951 |
+
norm_layer(ngf * mult * 2),
|
952 |
+
nn.ReLU(True)]
|
953 |
+
else:
|
954 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
|
955 |
+
norm_layer(ngf * mult * 2),
|
956 |
+
nn.ReLU(True),
|
957 |
+
Downsample(ngf * mult * 2)]
|
958 |
+
|
959 |
+
mult = 2 ** n_downsampling
|
960 |
+
for i in range(n_blocks): # add ResNet blocks
|
961 |
+
|
962 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
963 |
+
|
964 |
+
for i in range(n_downsampling): # add upsampling layers
|
965 |
+
mult = 2 ** (n_downsampling - i)
|
966 |
+
if no_antialias_up:
|
967 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
|
968 |
+
kernel_size=3, stride=2,
|
969 |
+
padding=1, output_padding=1,
|
970 |
+
bias=use_bias),
|
971 |
+
norm_layer(int(ngf * mult / 2)),
|
972 |
+
nn.ReLU(True)]
|
973 |
+
else:
|
974 |
+
model += [Upsample(ngf * mult),
|
975 |
+
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
|
976 |
+
kernel_size=3, stride=1,
|
977 |
+
padding=1, # output_padding=1,
|
978 |
+
bias=use_bias),
|
979 |
+
norm_layer(int(ngf * mult / 2)),
|
980 |
+
nn.ReLU(True)]
|
981 |
+
model += [nn.ReflectionPad2d(3)]
|
982 |
+
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
983 |
+
model += [nn.Tanh()]
|
984 |
+
|
985 |
+
self.model = nn.Sequential(*model)
|
986 |
+
|
987 |
+
def forward(self, input, layers=[], encode_only=False):
|
988 |
+
if -1 in layers:
|
989 |
+
layers.append(len(self.model))
|
990 |
+
if len(layers) > 0:
|
991 |
+
feat = input
|
992 |
+
feats = []
|
993 |
+
for layer_id, layer in enumerate(self.model):
|
994 |
+
# print(layer_id, layer)
|
995 |
+
feat = layer(feat)
|
996 |
+
if layer_id in layers:
|
997 |
+
# print("%d: adding the output of %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
|
998 |
+
feats.append(feat)
|
999 |
+
else:
|
1000 |
+
# print("%d: skipping %s %d" % (layer_id, layer.__class__.__name__, feat.size(1)))
|
1001 |
+
pass
|
1002 |
+
if layer_id == layers[-1] and encode_only:
|
1003 |
+
# print('encoder only return features')
|
1004 |
+
return feats # return intermediate features alone; stop in the last layers
|
1005 |
+
|
1006 |
+
return feat, feats # return both output and intermediate features
|
1007 |
+
else:
|
1008 |
+
"""Standard forward"""
|
1009 |
+
fake = self.model(input)
|
1010 |
+
return fake
|
1011 |
+
|
1012 |
+
|
1013 |
+
class ResnetDecoder(nn.Module):
|
1014 |
+
"""Resnet-based decoder that consists of a few Resnet blocks + a few upsampling operations.
|
1015 |
+
"""
|
1016 |
+
|
1017 |
+
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
|
1018 |
+
"""Construct a Resnet-based decoder
|
1019 |
+
|
1020 |
+
Parameters:
|
1021 |
+
input_nc (int) -- the number of channels in input images
|
1022 |
+
output_nc (int) -- the number of channels in output images
|
1023 |
+
ngf (int) -- the number of filters in the last conv layer
|
1024 |
+
norm_layer -- normalization layer
|
1025 |
+
use_dropout (bool) -- if use dropout layers
|
1026 |
+
n_blocks (int) -- the number of ResNet blocks
|
1027 |
+
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
1028 |
+
"""
|
1029 |
+
assert(n_blocks >= 0)
|
1030 |
+
super(ResnetDecoder, self).__init__()
|
1031 |
+
if type(norm_layer) == functools.partial:
|
1032 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
1033 |
+
else:
|
1034 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
1035 |
+
model = []
|
1036 |
+
n_downsampling = 2
|
1037 |
+
mult = 2 ** n_downsampling
|
1038 |
+
for i in range(n_blocks): # add ResNet blocks
|
1039 |
+
|
1040 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
1041 |
+
|
1042 |
+
for i in range(n_downsampling): # add upsampling layers
|
1043 |
+
mult = 2 ** (n_downsampling - i)
|
1044 |
+
if(no_antialias):
|
1045 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
|
1046 |
+
kernel_size=3, stride=2,
|
1047 |
+
padding=1, output_padding=1,
|
1048 |
+
bias=use_bias),
|
1049 |
+
norm_layer(int(ngf * mult / 2)),
|
1050 |
+
nn.ReLU(True)]
|
1051 |
+
else:
|
1052 |
+
model += [Upsample(ngf * mult),
|
1053 |
+
nn.Conv2d(ngf * mult, int(ngf * mult / 2),
|
1054 |
+
kernel_size=3, stride=1,
|
1055 |
+
padding=1,
|
1056 |
+
bias=use_bias),
|
1057 |
+
norm_layer(int(ngf * mult / 2)),
|
1058 |
+
nn.ReLU(True)]
|
1059 |
+
model += [nn.ReflectionPad2d(3)]
|
1060 |
+
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
1061 |
+
model += [nn.Tanh()]
|
1062 |
+
|
1063 |
+
self.model = nn.Sequential(*model)
|
1064 |
+
|
1065 |
+
def forward(self, input):
|
1066 |
+
"""Standard forward"""
|
1067 |
+
return self.model(input)
|
1068 |
+
|
1069 |
+
|
1070 |
+
class ResnetEncoder(nn.Module):
|
1071 |
+
"""Resnet-based encoder that consists of a few downsampling + several Resnet blocks
|
1072 |
+
"""
|
1073 |
+
|
1074 |
+
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False):
|
1075 |
+
"""Construct a Resnet-based encoder
|
1076 |
+
|
1077 |
+
Parameters:
|
1078 |
+
input_nc (int) -- the number of channels in input images
|
1079 |
+
output_nc (int) -- the number of channels in output images
|
1080 |
+
ngf (int) -- the number of filters in the last conv layer
|
1081 |
+
norm_layer -- normalization layer
|
1082 |
+
use_dropout (bool) -- if use dropout layers
|
1083 |
+
n_blocks (int) -- the number of ResNet blocks
|
1084 |
+
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
1085 |
+
"""
|
1086 |
+
assert(n_blocks >= 0)
|
1087 |
+
super(ResnetEncoder, self).__init__()
|
1088 |
+
if type(norm_layer) == functools.partial:
|
1089 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
1090 |
+
else:
|
1091 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
1092 |
+
|
1093 |
+
model = [nn.ReflectionPad2d(3),
|
1094 |
+
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
|
1095 |
+
norm_layer(ngf),
|
1096 |
+
nn.ReLU(True)]
|
1097 |
+
|
1098 |
+
n_downsampling = 2
|
1099 |
+
for i in range(n_downsampling): # add downsampling layers
|
1100 |
+
mult = 2 ** i
|
1101 |
+
if(no_antialias):
|
1102 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
|
1103 |
+
norm_layer(ngf * mult * 2),
|
1104 |
+
nn.ReLU(True)]
|
1105 |
+
else:
|
1106 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),
|
1107 |
+
norm_layer(ngf * mult * 2),
|
1108 |
+
nn.ReLU(True),
|
1109 |
+
Downsample(ngf * mult * 2)]
|
1110 |
+
|
1111 |
+
mult = 2 ** n_downsampling
|
1112 |
+
for i in range(n_blocks): # add ResNet blocks
|
1113 |
+
|
1114 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
1115 |
+
|
1116 |
+
self.model = nn.Sequential(*model)
|
1117 |
+
|
1118 |
+
def forward(self, input):
|
1119 |
+
"""Standard forward"""
|
1120 |
+
return self.model(input)
|
1121 |
+
|
1122 |
+
|
1123 |
+
class ResnetBlock(nn.Module):
|
1124 |
+
"""Define a Resnet block"""
|
1125 |
+
|
1126 |
+
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
1127 |
+
"""Initialize the Resnet block
|
1128 |
+
|
1129 |
+
A resnet block is a conv block with skip connections
|
1130 |
+
We construct a conv block with build_conv_block function,
|
1131 |
+
and implement skip connections in <forward> function.
|
1132 |
+
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
|
1133 |
+
"""
|
1134 |
+
super(ResnetBlock, self).__init__()
|
1135 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
|
1136 |
+
|
1137 |
+
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
1138 |
+
"""Construct a convolutional block.
|
1139 |
+
|
1140 |
+
Parameters:
|
1141 |
+
dim (int) -- the number of channels in the conv layer.
|
1142 |
+
padding_type (str) -- the name of padding layer: reflect | replicate | zero
|
1143 |
+
norm_layer -- normalization layer
|
1144 |
+
use_dropout (bool) -- if use dropout layers.
|
1145 |
+
use_bias (bool) -- if the conv layer uses bias or not
|
1146 |
+
|
1147 |
+
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
|
1148 |
+
"""
|
1149 |
+
conv_block = []
|
1150 |
+
p = 0
|
1151 |
+
if padding_type == 'reflect':
|
1152 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
1153 |
+
elif padding_type == 'replicate':
|
1154 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
1155 |
+
elif padding_type == 'zero':
|
1156 |
+
p = 1
|
1157 |
+
else:
|
1158 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
1159 |
+
|
1160 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
|
1161 |
+
if use_dropout:
|
1162 |
+
conv_block += [nn.Dropout(0.5)]
|
1163 |
+
|
1164 |
+
p = 0
|
1165 |
+
if padding_type == 'reflect':
|
1166 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
1167 |
+
elif padding_type == 'replicate':
|
1168 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
1169 |
+
elif padding_type == 'zero':
|
1170 |
+
p = 1
|
1171 |
+
else:
|
1172 |
+
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
1173 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
|
1174 |
+
|
1175 |
+
return nn.Sequential(*conv_block)
|
1176 |
+
|
1177 |
+
def forward(self, x):
|
1178 |
+
"""Forward function (with skip connections)"""
|
1179 |
+
out = x + self.conv_block(x) # add skip connections
|
1180 |
+
return out
|
1181 |
+
|
1182 |
+
|
1183 |
+
class UnetGenerator(nn.Module):
|
1184 |
+
"""Create a Unet-based generator"""
|
1185 |
+
|
1186 |
+
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
1187 |
+
"""Construct a Unet generator
|
1188 |
+
Parameters:
|
1189 |
+
input_nc (int) -- the number of channels in input images
|
1190 |
+
output_nc (int) -- the number of channels in output images
|
1191 |
+
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
1192 |
+
image of size 128x128 will become of size 1x1 # at the bottleneck
|
1193 |
+
ngf (int) -- the number of filters in the last conv layer
|
1194 |
+
norm_layer -- normalization layer
|
1195 |
+
|
1196 |
+
We construct the U-Net from the innermost layer to the outermost layer.
|
1197 |
+
It is a recursive process.
|
1198 |
+
"""
|
1199 |
+
super(UnetGenerator, self).__init__()
|
1200 |
+
# construct unet structure
|
1201 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
|
1202 |
+
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
1203 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
|
1204 |
+
# gradually reduce the number of filters from ngf * 8 to ngf
|
1205 |
+
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
1206 |
+
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
1207 |
+
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
1208 |
+
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
|
1209 |
+
|
1210 |
+
def forward(self, input):
|
1211 |
+
"""Standard forward"""
|
1212 |
+
return self.model(input)
|
1213 |
+
|
1214 |
+
|
1215 |
+
class UnetSkipConnectionBlock(nn.Module):
|
1216 |
+
"""Defines the Unet submodule with skip connection.
|
1217 |
+
X -------------------identity----------------------
|
1218 |
+
|-- downsampling -- |submodule| -- upsampling --|
|
1219 |
+
"""
|
1220 |
+
|
1221 |
+
def __init__(self, outer_nc, inner_nc, input_nc=None,
|
1222 |
+
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
1223 |
+
"""Construct a Unet submodule with skip connections.
|
1224 |
+
|
1225 |
+
Parameters:
|
1226 |
+
outer_nc (int) -- the number of filters in the outer conv layer
|
1227 |
+
inner_nc (int) -- the number of filters in the inner conv layer
|
1228 |
+
input_nc (int) -- the number of channels in input images/features
|
1229 |
+
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
1230 |
+
outermost (bool) -- if this module is the outermost module
|
1231 |
+
innermost (bool) -- if this module is the innermost module
|
1232 |
+
norm_layer -- normalization layer
|
1233 |
+
use_dropout (bool) -- if use dropout layers.
|
1234 |
+
"""
|
1235 |
+
super(UnetSkipConnectionBlock, self).__init__()
|
1236 |
+
self.outermost = outermost
|
1237 |
+
if type(norm_layer) == functools.partial:
|
1238 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
1239 |
+
else:
|
1240 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
1241 |
+
if input_nc is None:
|
1242 |
+
input_nc = outer_nc
|
1243 |
+
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
|
1244 |
+
stride=2, padding=1, bias=use_bias)
|
1245 |
+
downrelu = nn.LeakyReLU(0.2, True)
|
1246 |
+
downnorm = norm_layer(inner_nc)
|
1247 |
+
uprelu = nn.ReLU(True)
|
1248 |
+
upnorm = norm_layer(outer_nc)
|
1249 |
+
|
1250 |
+
if outermost:
|
1251 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
1252 |
+
kernel_size=4, stride=2,
|
1253 |
+
padding=1)
|
1254 |
+
down = [downconv]
|
1255 |
+
up = [uprelu, upconv, nn.Tanh()]
|
1256 |
+
model = down + [submodule] + up
|
1257 |
+
elif innermost:
|
1258 |
+
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
|
1259 |
+
kernel_size=4, stride=2,
|
1260 |
+
padding=1, bias=use_bias)
|
1261 |
+
down = [downrelu, downconv]
|
1262 |
+
up = [uprelu, upconv, upnorm]
|
1263 |
+
model = down + up
|
1264 |
+
else:
|
1265 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
1266 |
+
kernel_size=4, stride=2,
|
1267 |
+
padding=1, bias=use_bias)
|
1268 |
+
down = [downrelu, downconv, downnorm]
|
1269 |
+
up = [uprelu, upconv, upnorm]
|
1270 |
+
|
1271 |
+
if use_dropout:
|
1272 |
+
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
1273 |
+
else:
|
1274 |
+
model = down + [submodule] + up
|
1275 |
+
|
1276 |
+
self.model = nn.Sequential(*model)
|
1277 |
+
|
1278 |
+
def forward(self, x):
|
1279 |
+
if self.outermost:
|
1280 |
+
return self.model(x)
|
1281 |
+
else: # add skip connections
|
1282 |
+
return torch.cat([x, self.model(x)], 1)
|
1283 |
+
|
1284 |
+
|
1285 |
+
class NLayerDiscriminator(nn.Module):
|
1286 |
+
"""Defines a PatchGAN discriminator"""
|
1287 |
+
|
1288 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
|
1289 |
+
"""Construct a PatchGAN discriminator
|
1290 |
+
|
1291 |
+
Parameters:
|
1292 |
+
input_nc (int) -- the number of channels in input images
|
1293 |
+
ndf (int) -- the number of filters in the last conv layer
|
1294 |
+
n_layers (int) -- the number of conv layers in the discriminator
|
1295 |
+
norm_layer -- normalization layer
|
1296 |
+
"""
|
1297 |
+
super(NLayerDiscriminator, self).__init__()
|
1298 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
1299 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
1300 |
+
else:
|
1301 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
1302 |
+
|
1303 |
+
kw = 4
|
1304 |
+
padw = 1
|
1305 |
+
if(no_antialias):
|
1306 |
+
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
|
1307 |
+
else:
|
1308 |
+
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True), Downsample(ndf)]
|
1309 |
+
nf_mult = 1
|
1310 |
+
nf_mult_prev = 1
|
1311 |
+
for n in range(1, n_layers): # gradually increase the number of filters
|
1312 |
+
nf_mult_prev = nf_mult
|
1313 |
+
nf_mult = min(2 ** n, 8)
|
1314 |
+
if(no_antialias):
|
1315 |
+
sequence += [
|
1316 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
|
1317 |
+
norm_layer(ndf * nf_mult),
|
1318 |
+
nn.LeakyReLU(0.2, True)
|
1319 |
+
]
|
1320 |
+
else:
|
1321 |
+
sequence += [
|
1322 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
|
1323 |
+
norm_layer(ndf * nf_mult),
|
1324 |
+
nn.LeakyReLU(0.2, True),
|
1325 |
+
Downsample(ndf * nf_mult)]
|
1326 |
+
|
1327 |
+
nf_mult_prev = nf_mult
|
1328 |
+
nf_mult = min(2 ** n_layers, 8)
|
1329 |
+
sequence += [
|
1330 |
+
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
|
1331 |
+
norm_layer(ndf * nf_mult),
|
1332 |
+
nn.LeakyReLU(0.2, True)
|
1333 |
+
]
|
1334 |
+
|
1335 |
+
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
|
1336 |
+
self.model = nn.Sequential(*sequence)
|
1337 |
+
|
1338 |
+
def forward(self, input):
|
1339 |
+
"""Standard forward."""
|
1340 |
+
return self.model(input)
|
1341 |
+
|
1342 |
+
|
1343 |
+
class PixelDiscriminator(nn.Module):
|
1344 |
+
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
|
1345 |
+
|
1346 |
+
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
|
1347 |
+
"""Construct a 1x1 PatchGAN discriminator
|
1348 |
+
|
1349 |
+
Parameters:
|
1350 |
+
input_nc (int) -- the number of channels in input images
|
1351 |
+
ndf (int) -- the number of filters in the last conv layer
|
1352 |
+
norm_layer -- normalization layer
|
1353 |
+
"""
|
1354 |
+
super(PixelDiscriminator, self).__init__()
|
1355 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
1356 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
1357 |
+
else:
|
1358 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
1359 |
+
|
1360 |
+
self.net = [
|
1361 |
+
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
|
1362 |
+
nn.LeakyReLU(0.2, True),
|
1363 |
+
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
|
1364 |
+
norm_layer(ndf * 2),
|
1365 |
+
nn.LeakyReLU(0.2, True),
|
1366 |
+
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
|
1367 |
+
|
1368 |
+
self.net = nn.Sequential(*self.net)
|
1369 |
+
|
1370 |
+
def forward(self, input):
|
1371 |
+
"""Standard forward."""
|
1372 |
+
return self.net(input)
|
1373 |
+
|
1374 |
+
|
1375 |
+
class PatchDiscriminator(NLayerDiscriminator):
|
1376 |
+
"""Defines a PatchGAN discriminator"""
|
1377 |
+
|
1378 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):
|
1379 |
+
super().__init__(input_nc, ndf, 2, norm_layer, no_antialias)
|
1380 |
+
|
1381 |
+
def forward(self, input):
|
1382 |
+
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
|
1383 |
+
size = 16
|
1384 |
+
Y = H // size
|
1385 |
+
X = W // size
|
1386 |
+
input = input.view(B, C, Y, size, X, size)
|
1387 |
+
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
|
1388 |
+
return super().forward(input)
|
1389 |
+
|
1390 |
+
|
1391 |
+
class GroupedChannelNorm(nn.Module):
|
1392 |
+
def __init__(self, num_groups):
|
1393 |
+
super().__init__()
|
1394 |
+
self.num_groups = num_groups
|
1395 |
+
|
1396 |
+
def forward(self, x):
|
1397 |
+
shape = list(x.shape)
|
1398 |
+
new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups] + shape[2:]
|
1399 |
+
x = x.view(*new_shape)
|
1400 |
+
mean = x.mean(dim=2, keepdim=True)
|
1401 |
+
std = x.std(dim=2, keepdim=True)
|
1402 |
+
x_norm = (x - mean) / (std + 1e-7)
|
1403 |
+
return x_norm.view(*shape)
|
models/patchnce.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from packaging import version
|
2 |
+
import torch
|
3 |
+
from torch import nn
|
4 |
+
|
5 |
+
|
6 |
+
class PatchNCELoss(nn.Module):
|
7 |
+
def __init__(self, opt):
|
8 |
+
super().__init__()
|
9 |
+
self.opt = opt
|
10 |
+
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
|
11 |
+
self.mask_dtype = torch.uint8 if version.parse(torch.__version__) < version.parse('1.2.0') else torch.bool
|
12 |
+
|
13 |
+
def forward(self, feat_q, feat_k):
|
14 |
+
num_patches = feat_q.shape[0]
|
15 |
+
dim = feat_q.shape[1]
|
16 |
+
feat_k = feat_k.detach()
|
17 |
+
|
18 |
+
# pos logit
|
19 |
+
l_pos = torch.bmm(
|
20 |
+
feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1))
|
21 |
+
l_pos = l_pos.view(num_patches, 1)
|
22 |
+
|
23 |
+
# neg logit
|
24 |
+
|
25 |
+
# Should the negatives from the other samples of a minibatch be utilized?
|
26 |
+
# In CUT and FastCUT, we found that it's best to only include negatives
|
27 |
+
# from the same image. Therefore, we set
|
28 |
+
# --nce_includes_all_negatives_from_minibatch as False
|
29 |
+
# However, for single-image translation, the minibatch consists of
|
30 |
+
# crops from the "same" high-resolution image.
|
31 |
+
# Therefore, we will include the negatives from the entire minibatch.
|
32 |
+
if self.opt.nce_includes_all_negatives_from_minibatch:
|
33 |
+
# reshape features as if they are all negatives of minibatch of size 1.
|
34 |
+
batch_dim_for_bmm = 1
|
35 |
+
else:
|
36 |
+
batch_dim_for_bmm = self.opt.batch_size
|
37 |
+
|
38 |
+
# reshape features to batch size
|
39 |
+
feat_q = feat_q.view(batch_dim_for_bmm, -1, dim)
|
40 |
+
feat_k = feat_k.view(batch_dim_for_bmm, -1, dim)
|
41 |
+
npatches = feat_q.size(1)
|
42 |
+
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
|
43 |
+
|
44 |
+
# diagonal entries are similarity between same features, and hence meaningless.
|
45 |
+
# just fill the diagonal with very small number, which is exp(-10) and almost zero
|
46 |
+
diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[None, :, :]
|
47 |
+
l_neg_curbatch.masked_fill_(diagonal, -10.0)
|
48 |
+
l_neg = l_neg_curbatch.view(-1, npatches)
|
49 |
+
|
50 |
+
out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
|
51 |
+
|
52 |
+
loss = self.cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
|
53 |
+
device=feat_q.device))
|
54 |
+
|
55 |
+
return loss
|
models/sincut_model.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from .cut_model import CUTModel
|
3 |
+
|
4 |
+
|
5 |
+
class SinCUTModel(CUTModel):
|
6 |
+
""" This class implements the single image translation model (Fig 9) of
|
7 |
+
Contrastive Learning for Unpaired Image-to-Image Translation
|
8 |
+
Taesung Park, Alexei A. Efros, Richard Zhang, Jun-Yan Zhu
|
9 |
+
ECCV, 2020
|
10 |
+
"""
|
11 |
+
|
12 |
+
@staticmethod
|
13 |
+
def modify_commandline_options(parser, is_train=True):
|
14 |
+
parser = CUTModel.modify_commandline_options(parser, is_train)
|
15 |
+
parser.add_argument('--lambda_R1', type=float, default=1.0,
|
16 |
+
help='weight for the R1 gradient penalty')
|
17 |
+
parser.add_argument('--lambda_identity', type=float, default=1.0,
|
18 |
+
help='the "identity preservation loss"')
|
19 |
+
|
20 |
+
parser.set_defaults(nce_includes_all_negatives_from_minibatch=True,
|
21 |
+
dataset_mode="singleimage",
|
22 |
+
netG="stylegan2",
|
23 |
+
stylegan2_G_num_downsampling=1,
|
24 |
+
netD="stylegan2",
|
25 |
+
gan_mode="nonsaturating",
|
26 |
+
num_patches=1,
|
27 |
+
nce_layers="0,2,4",
|
28 |
+
lambda_NCE=4.0,
|
29 |
+
ngf=10,
|
30 |
+
ndf=8,
|
31 |
+
lr=0.002,
|
32 |
+
beta1=0.0,
|
33 |
+
beta2=0.99,
|
34 |
+
load_size=1024,
|
35 |
+
crop_size=64,
|
36 |
+
preprocess="zoom_and_patch",
|
37 |
+
)
|
38 |
+
|
39 |
+
if is_train:
|
40 |
+
parser.set_defaults(preprocess="zoom_and_patch",
|
41 |
+
batch_size=16,
|
42 |
+
save_epoch_freq=1,
|
43 |
+
save_latest_freq=20000,
|
44 |
+
n_epochs=8,
|
45 |
+
n_epochs_decay=8,
|
46 |
+
|
47 |
+
)
|
48 |
+
else:
|
49 |
+
parser.set_defaults(preprocess="none", # load the whole image as it is
|
50 |
+
batch_size=1,
|
51 |
+
num_test=1,
|
52 |
+
)
|
53 |
+
|
54 |
+
return parser
|
55 |
+
|
56 |
+
def __init__(self, opt):
|
57 |
+
super().__init__(opt)
|
58 |
+
if self.isTrain:
|
59 |
+
if opt.lambda_R1 > 0.0:
|
60 |
+
self.loss_names += ['D_R1']
|
61 |
+
if opt.lambda_identity > 0.0:
|
62 |
+
self.loss_names += ['idt']
|
63 |
+
|
64 |
+
def compute_D_loss(self):
|
65 |
+
self.real_B.requires_grad_()
|
66 |
+
GAN_loss_D = super().compute_D_loss()
|
67 |
+
self.loss_D_R1 = self.R1_loss(self.pred_real, self.real_B)
|
68 |
+
self.loss_D = GAN_loss_D + self.loss_D_R1
|
69 |
+
return self.loss_D
|
70 |
+
|
71 |
+
def compute_G_loss(self):
|
72 |
+
CUT_loss_G = super().compute_G_loss()
|
73 |
+
self.loss_idt = torch.nn.functional.l1_loss(self.idt_B, self.real_B) * self.opt.lambda_identity
|
74 |
+
return CUT_loss_G + self.loss_idt
|
75 |
+
|
76 |
+
def R1_loss(self, real_pred, real_img):
|
77 |
+
grad_real, = torch.autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True, retain_graph=True)
|
78 |
+
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
|
79 |
+
return grad_penalty * (self.opt.lambda_R1 * 0.5)
|
models/stylegan_networks.py
ADDED
@@ -0,0 +1,914 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The network architectures is based on PyTorch implemenation of StyleGAN2Encoder.
|
3 |
+
Original PyTorch repo: https://github.com/rosinality/style-based-gan-pytorch
|
4 |
+
Origianl StyelGAN2 paper: https://github.com/NVlabs/stylegan2
|
5 |
+
We use the network architeture for our single-image traning setting.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import math
|
9 |
+
import numpy as np
|
10 |
+
import random
|
11 |
+
|
12 |
+
import torch
|
13 |
+
from torch import nn
|
14 |
+
from torch.nn import functional as F
|
15 |
+
|
16 |
+
|
17 |
+
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
|
18 |
+
return F.leaky_relu(input + bias, negative_slope) * scale
|
19 |
+
|
20 |
+
|
21 |
+
class FusedLeakyReLU(nn.Module):
|
22 |
+
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
|
23 |
+
super().__init__()
|
24 |
+
self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1))
|
25 |
+
self.negative_slope = negative_slope
|
26 |
+
self.scale = scale
|
27 |
+
|
28 |
+
def forward(self, input):
|
29 |
+
# print("FusedLeakyReLU: ", input.abs().mean())
|
30 |
+
out = fused_leaky_relu(input, self.bias,
|
31 |
+
self.negative_slope,
|
32 |
+
self.scale)
|
33 |
+
# print("FusedLeakyReLU: ", out.abs().mean())
|
34 |
+
return out
|
35 |
+
|
36 |
+
|
37 |
+
def upfirdn2d_native(
|
38 |
+
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
|
39 |
+
):
|
40 |
+
_, minor, in_h, in_w = input.shape
|
41 |
+
kernel_h, kernel_w = kernel.shape
|
42 |
+
|
43 |
+
out = input.view(-1, minor, in_h, 1, in_w, 1)
|
44 |
+
out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0])
|
45 |
+
out = out.view(-1, minor, in_h * up_y, in_w * up_x)
|
46 |
+
|
47 |
+
out = F.pad(
|
48 |
+
out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
|
49 |
+
)
|
50 |
+
out = out[
|
51 |
+
:,
|
52 |
+
:,
|
53 |
+
max(-pad_y0, 0): out.shape[2] - max(-pad_y1, 0),
|
54 |
+
max(-pad_x0, 0): out.shape[3] - max(-pad_x1, 0),
|
55 |
+
]
|
56 |
+
|
57 |
+
# out = out.permute(0, 3, 1, 2)
|
58 |
+
out = out.reshape(
|
59 |
+
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
|
60 |
+
)
|
61 |
+
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
|
62 |
+
out = F.conv2d(out, w)
|
63 |
+
out = out.reshape(
|
64 |
+
-1,
|
65 |
+
minor,
|
66 |
+
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
|
67 |
+
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
|
68 |
+
)
|
69 |
+
# out = out.permute(0, 2, 3, 1)
|
70 |
+
|
71 |
+
return out[:, :, ::down_y, ::down_x]
|
72 |
+
|
73 |
+
|
74 |
+
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
|
75 |
+
return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
|
76 |
+
|
77 |
+
|
78 |
+
class PixelNorm(nn.Module):
|
79 |
+
def __init__(self):
|
80 |
+
super().__init__()
|
81 |
+
|
82 |
+
def forward(self, input):
|
83 |
+
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
84 |
+
|
85 |
+
|
86 |
+
def make_kernel(k):
|
87 |
+
k = torch.tensor(k, dtype=torch.float32)
|
88 |
+
|
89 |
+
if len(k.shape) == 1:
|
90 |
+
k = k[None, :] * k[:, None]
|
91 |
+
|
92 |
+
k /= k.sum()
|
93 |
+
|
94 |
+
return k
|
95 |
+
|
96 |
+
|
97 |
+
class Upsample(nn.Module):
|
98 |
+
def __init__(self, kernel, factor=2):
|
99 |
+
super().__init__()
|
100 |
+
|
101 |
+
self.factor = factor
|
102 |
+
kernel = make_kernel(kernel) * (factor ** 2)
|
103 |
+
self.register_buffer('kernel', kernel)
|
104 |
+
|
105 |
+
p = kernel.shape[0] - factor
|
106 |
+
|
107 |
+
pad0 = (p + 1) // 2 + factor - 1
|
108 |
+
pad1 = p // 2
|
109 |
+
|
110 |
+
self.pad = (pad0, pad1)
|
111 |
+
|
112 |
+
def forward(self, input):
|
113 |
+
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
|
114 |
+
|
115 |
+
return out
|
116 |
+
|
117 |
+
|
118 |
+
class Downsample(nn.Module):
|
119 |
+
def __init__(self, kernel, factor=2):
|
120 |
+
super().__init__()
|
121 |
+
|
122 |
+
self.factor = factor
|
123 |
+
kernel = make_kernel(kernel)
|
124 |
+
self.register_buffer('kernel', kernel)
|
125 |
+
|
126 |
+
p = kernel.shape[0] - factor
|
127 |
+
|
128 |
+
pad0 = (p + 1) // 2
|
129 |
+
pad1 = p // 2
|
130 |
+
|
131 |
+
self.pad = (pad0, pad1)
|
132 |
+
|
133 |
+
def forward(self, input):
|
134 |
+
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
|
135 |
+
|
136 |
+
return out
|
137 |
+
|
138 |
+
|
139 |
+
class Blur(nn.Module):
|
140 |
+
def __init__(self, kernel, pad, upsample_factor=1):
|
141 |
+
super().__init__()
|
142 |
+
|
143 |
+
kernel = make_kernel(kernel)
|
144 |
+
|
145 |
+
if upsample_factor > 1:
|
146 |
+
kernel = kernel * (upsample_factor ** 2)
|
147 |
+
|
148 |
+
self.register_buffer('kernel', kernel)
|
149 |
+
|
150 |
+
self.pad = pad
|
151 |
+
|
152 |
+
def forward(self, input):
|
153 |
+
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
154 |
+
|
155 |
+
return out
|
156 |
+
|
157 |
+
|
158 |
+
class EqualConv2d(nn.Module):
|
159 |
+
def __init__(
|
160 |
+
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
|
161 |
+
):
|
162 |
+
super().__init__()
|
163 |
+
|
164 |
+
self.weight = nn.Parameter(
|
165 |
+
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
166 |
+
)
|
167 |
+
self.scale = math.sqrt(1) / math.sqrt(in_channel * (kernel_size ** 2))
|
168 |
+
|
169 |
+
self.stride = stride
|
170 |
+
self.padding = padding
|
171 |
+
|
172 |
+
if bias:
|
173 |
+
self.bias = nn.Parameter(torch.zeros(out_channel))
|
174 |
+
|
175 |
+
else:
|
176 |
+
self.bias = None
|
177 |
+
|
178 |
+
def forward(self, input):
|
179 |
+
# print("Before EqualConv2d: ", input.abs().mean())
|
180 |
+
out = F.conv2d(
|
181 |
+
input,
|
182 |
+
self.weight * self.scale,
|
183 |
+
bias=self.bias,
|
184 |
+
stride=self.stride,
|
185 |
+
padding=self.padding,
|
186 |
+
)
|
187 |
+
# print("After EqualConv2d: ", out.abs().mean(), (self.weight * self.scale).abs().mean())
|
188 |
+
|
189 |
+
return out
|
190 |
+
|
191 |
+
def __repr__(self):
|
192 |
+
return (
|
193 |
+
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
|
194 |
+
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
|
195 |
+
)
|
196 |
+
|
197 |
+
|
198 |
+
class EqualLinear(nn.Module):
|
199 |
+
def __init__(
|
200 |
+
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
201 |
+
):
|
202 |
+
super().__init__()
|
203 |
+
|
204 |
+
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
205 |
+
|
206 |
+
if bias:
|
207 |
+
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
208 |
+
|
209 |
+
else:
|
210 |
+
self.bias = None
|
211 |
+
|
212 |
+
self.activation = activation
|
213 |
+
|
214 |
+
self.scale = (math.sqrt(1) / math.sqrt(in_dim)) * lr_mul
|
215 |
+
self.lr_mul = lr_mul
|
216 |
+
|
217 |
+
def forward(self, input):
|
218 |
+
if self.activation:
|
219 |
+
out = F.linear(input, self.weight * self.scale)
|
220 |
+
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
221 |
+
|
222 |
+
else:
|
223 |
+
out = F.linear(
|
224 |
+
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
225 |
+
)
|
226 |
+
|
227 |
+
return out
|
228 |
+
|
229 |
+
def __repr__(self):
|
230 |
+
return (
|
231 |
+
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
|
232 |
+
)
|
233 |
+
|
234 |
+
|
235 |
+
class ScaledLeakyReLU(nn.Module):
|
236 |
+
def __init__(self, negative_slope=0.2):
|
237 |
+
super().__init__()
|
238 |
+
|
239 |
+
self.negative_slope = negative_slope
|
240 |
+
|
241 |
+
def forward(self, input):
|
242 |
+
out = F.leaky_relu(input, negative_slope=self.negative_slope)
|
243 |
+
|
244 |
+
return out * math.sqrt(2)
|
245 |
+
|
246 |
+
|
247 |
+
class ModulatedConv2d(nn.Module):
|
248 |
+
def __init__(
|
249 |
+
self,
|
250 |
+
in_channel,
|
251 |
+
out_channel,
|
252 |
+
kernel_size,
|
253 |
+
style_dim,
|
254 |
+
demodulate=True,
|
255 |
+
upsample=False,
|
256 |
+
downsample=False,
|
257 |
+
blur_kernel=[1, 3, 3, 1],
|
258 |
+
):
|
259 |
+
super().__init__()
|
260 |
+
|
261 |
+
self.eps = 1e-8
|
262 |
+
self.kernel_size = kernel_size
|
263 |
+
self.in_channel = in_channel
|
264 |
+
self.out_channel = out_channel
|
265 |
+
self.upsample = upsample
|
266 |
+
self.downsample = downsample
|
267 |
+
|
268 |
+
if upsample:
|
269 |
+
factor = 2
|
270 |
+
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
271 |
+
pad0 = (p + 1) // 2 + factor - 1
|
272 |
+
pad1 = p // 2 + 1
|
273 |
+
|
274 |
+
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
|
275 |
+
|
276 |
+
if downsample:
|
277 |
+
factor = 2
|
278 |
+
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
279 |
+
pad0 = (p + 1) // 2
|
280 |
+
pad1 = p // 2
|
281 |
+
|
282 |
+
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
283 |
+
|
284 |
+
fan_in = in_channel * kernel_size ** 2
|
285 |
+
self.scale = math.sqrt(1) / math.sqrt(fan_in)
|
286 |
+
self.padding = kernel_size // 2
|
287 |
+
|
288 |
+
self.weight = nn.Parameter(
|
289 |
+
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
290 |
+
)
|
291 |
+
|
292 |
+
if style_dim is not None and style_dim > 0:
|
293 |
+
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
294 |
+
|
295 |
+
self.demodulate = demodulate
|
296 |
+
|
297 |
+
def __repr__(self):
|
298 |
+
return (
|
299 |
+
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
|
300 |
+
f'upsample={self.upsample}, downsample={self.downsample})'
|
301 |
+
)
|
302 |
+
|
303 |
+
def forward(self, input, style):
|
304 |
+
batch, in_channel, height, width = input.shape
|
305 |
+
|
306 |
+
if style is not None:
|
307 |
+
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
308 |
+
else:
|
309 |
+
style = torch.ones(batch, 1, in_channel, 1, 1).cuda()
|
310 |
+
weight = self.scale * self.weight * style
|
311 |
+
|
312 |
+
if self.demodulate:
|
313 |
+
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
314 |
+
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
315 |
+
|
316 |
+
weight = weight.view(
|
317 |
+
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
318 |
+
)
|
319 |
+
|
320 |
+
if self.upsample:
|
321 |
+
input = input.view(1, batch * in_channel, height, width)
|
322 |
+
weight = weight.view(
|
323 |
+
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
324 |
+
)
|
325 |
+
weight = weight.transpose(1, 2).reshape(
|
326 |
+
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
327 |
+
)
|
328 |
+
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
|
329 |
+
_, _, height, width = out.shape
|
330 |
+
out = out.view(batch, self.out_channel, height, width)
|
331 |
+
out = self.blur(out)
|
332 |
+
|
333 |
+
elif self.downsample:
|
334 |
+
input = self.blur(input)
|
335 |
+
_, _, height, width = input.shape
|
336 |
+
input = input.view(1, batch * in_channel, height, width)
|
337 |
+
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
|
338 |
+
_, _, height, width = out.shape
|
339 |
+
out = out.view(batch, self.out_channel, height, width)
|
340 |
+
|
341 |
+
else:
|
342 |
+
input = input.view(1, batch * in_channel, height, width)
|
343 |
+
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
|
344 |
+
_, _, height, width = out.shape
|
345 |
+
out = out.view(batch, self.out_channel, height, width)
|
346 |
+
|
347 |
+
return out
|
348 |
+
|
349 |
+
|
350 |
+
class NoiseInjection(nn.Module):
|
351 |
+
def __init__(self):
|
352 |
+
super().__init__()
|
353 |
+
|
354 |
+
self.weight = nn.Parameter(torch.zeros(1))
|
355 |
+
|
356 |
+
def forward(self, image, noise=None):
|
357 |
+
if noise is None:
|
358 |
+
batch, _, height, width = image.shape
|
359 |
+
noise = image.new_empty(batch, 1, height, width).normal_()
|
360 |
+
|
361 |
+
return image + self.weight * noise
|
362 |
+
|
363 |
+
|
364 |
+
class ConstantInput(nn.Module):
|
365 |
+
def __init__(self, channel, size=4):
|
366 |
+
super().__init__()
|
367 |
+
|
368 |
+
self.input = nn.Parameter(torch.randn(1, channel, size, size))
|
369 |
+
|
370 |
+
def forward(self, input):
|
371 |
+
batch = input.shape[0]
|
372 |
+
out = self.input.repeat(batch, 1, 1, 1)
|
373 |
+
|
374 |
+
return out
|
375 |
+
|
376 |
+
|
377 |
+
class StyledConv(nn.Module):
|
378 |
+
def __init__(
|
379 |
+
self,
|
380 |
+
in_channel,
|
381 |
+
out_channel,
|
382 |
+
kernel_size,
|
383 |
+
style_dim=None,
|
384 |
+
upsample=False,
|
385 |
+
blur_kernel=[1, 3, 3, 1],
|
386 |
+
demodulate=True,
|
387 |
+
inject_noise=True,
|
388 |
+
):
|
389 |
+
super().__init__()
|
390 |
+
|
391 |
+
self.inject_noise = inject_noise
|
392 |
+
self.conv = ModulatedConv2d(
|
393 |
+
in_channel,
|
394 |
+
out_channel,
|
395 |
+
kernel_size,
|
396 |
+
style_dim,
|
397 |
+
upsample=upsample,
|
398 |
+
blur_kernel=blur_kernel,
|
399 |
+
demodulate=demodulate,
|
400 |
+
)
|
401 |
+
|
402 |
+
self.noise = NoiseInjection()
|
403 |
+
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
|
404 |
+
# self.activate = ScaledLeakyReLU(0.2)
|
405 |
+
self.activate = FusedLeakyReLU(out_channel)
|
406 |
+
|
407 |
+
def forward(self, input, style=None, noise=None):
|
408 |
+
out = self.conv(input, style)
|
409 |
+
if self.inject_noise:
|
410 |
+
out = self.noise(out, noise=noise)
|
411 |
+
# out = out + self.bias
|
412 |
+
out = self.activate(out)
|
413 |
+
|
414 |
+
return out
|
415 |
+
|
416 |
+
|
417 |
+
class ToRGB(nn.Module):
|
418 |
+
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
419 |
+
super().__init__()
|
420 |
+
|
421 |
+
if upsample:
|
422 |
+
self.upsample = Upsample(blur_kernel)
|
423 |
+
|
424 |
+
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
|
425 |
+
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
426 |
+
|
427 |
+
def forward(self, input, style, skip=None):
|
428 |
+
out = self.conv(input, style)
|
429 |
+
out = out + self.bias
|
430 |
+
|
431 |
+
if skip is not None:
|
432 |
+
skip = self.upsample(skip)
|
433 |
+
|
434 |
+
out = out + skip
|
435 |
+
|
436 |
+
return out
|
437 |
+
|
438 |
+
|
439 |
+
class Generator(nn.Module):
|
440 |
+
def __init__(
|
441 |
+
self,
|
442 |
+
size,
|
443 |
+
style_dim,
|
444 |
+
n_mlp,
|
445 |
+
channel_multiplier=2,
|
446 |
+
blur_kernel=[1, 3, 3, 1],
|
447 |
+
lr_mlp=0.01,
|
448 |
+
):
|
449 |
+
super().__init__()
|
450 |
+
|
451 |
+
self.size = size
|
452 |
+
|
453 |
+
self.style_dim = style_dim
|
454 |
+
|
455 |
+
layers = [PixelNorm()]
|
456 |
+
|
457 |
+
for i in range(n_mlp):
|
458 |
+
layers.append(
|
459 |
+
EqualLinear(
|
460 |
+
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
|
461 |
+
)
|
462 |
+
)
|
463 |
+
|
464 |
+
self.style = nn.Sequential(*layers)
|
465 |
+
|
466 |
+
self.channels = {
|
467 |
+
4: 512,
|
468 |
+
8: 512,
|
469 |
+
16: 512,
|
470 |
+
32: 512,
|
471 |
+
64: 256 * channel_multiplier,
|
472 |
+
128: 128 * channel_multiplier,
|
473 |
+
256: 64 * channel_multiplier,
|
474 |
+
512: 32 * channel_multiplier,
|
475 |
+
1024: 16 * channel_multiplier,
|
476 |
+
}
|
477 |
+
|
478 |
+
self.input = ConstantInput(self.channels[4])
|
479 |
+
self.conv1 = StyledConv(
|
480 |
+
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
481 |
+
)
|
482 |
+
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
483 |
+
|
484 |
+
self.log_size = int(math.log(size, 2))
|
485 |
+
self.num_layers = (self.log_size - 2) * 2 + 1
|
486 |
+
|
487 |
+
self.convs = nn.ModuleList()
|
488 |
+
self.upsamples = nn.ModuleList()
|
489 |
+
self.to_rgbs = nn.ModuleList()
|
490 |
+
self.noises = nn.Module()
|
491 |
+
|
492 |
+
in_channel = self.channels[4]
|
493 |
+
|
494 |
+
for layer_idx in range(self.num_layers):
|
495 |
+
res = (layer_idx + 5) // 2
|
496 |
+
shape = [1, 1, 2 ** res, 2 ** res]
|
497 |
+
self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
|
498 |
+
|
499 |
+
for i in range(3, self.log_size + 1):
|
500 |
+
out_channel = self.channels[2 ** i]
|
501 |
+
|
502 |
+
self.convs.append(
|
503 |
+
StyledConv(
|
504 |
+
in_channel,
|
505 |
+
out_channel,
|
506 |
+
3,
|
507 |
+
style_dim,
|
508 |
+
upsample=True,
|
509 |
+
blur_kernel=blur_kernel,
|
510 |
+
)
|
511 |
+
)
|
512 |
+
|
513 |
+
self.convs.append(
|
514 |
+
StyledConv(
|
515 |
+
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
516 |
+
)
|
517 |
+
)
|
518 |
+
|
519 |
+
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
520 |
+
|
521 |
+
in_channel = out_channel
|
522 |
+
|
523 |
+
self.n_latent = self.log_size * 2 - 2
|
524 |
+
|
525 |
+
def make_noise(self):
|
526 |
+
device = self.input.input.device
|
527 |
+
|
528 |
+
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
|
529 |
+
|
530 |
+
for i in range(3, self.log_size + 1):
|
531 |
+
for _ in range(2):
|
532 |
+
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
|
533 |
+
|
534 |
+
return noises
|
535 |
+
|
536 |
+
def mean_latent(self, n_latent):
|
537 |
+
latent_in = torch.randn(
|
538 |
+
n_latent, self.style_dim, device=self.input.input.device
|
539 |
+
)
|
540 |
+
latent = self.style(latent_in).mean(0, keepdim=True)
|
541 |
+
|
542 |
+
return latent
|
543 |
+
|
544 |
+
def get_latent(self, input):
|
545 |
+
return self.style(input)
|
546 |
+
|
547 |
+
def forward(
|
548 |
+
self,
|
549 |
+
styles,
|
550 |
+
return_latents=False,
|
551 |
+
inject_index=None,
|
552 |
+
truncation=1,
|
553 |
+
truncation_latent=None,
|
554 |
+
input_is_latent=False,
|
555 |
+
noise=None,
|
556 |
+
randomize_noise=True,
|
557 |
+
):
|
558 |
+
if not input_is_latent:
|
559 |
+
styles = [self.style(s) for s in styles]
|
560 |
+
|
561 |
+
if noise is None:
|
562 |
+
if randomize_noise:
|
563 |
+
noise = [None] * self.num_layers
|
564 |
+
else:
|
565 |
+
noise = [
|
566 |
+
getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
|
567 |
+
]
|
568 |
+
|
569 |
+
if truncation < 1:
|
570 |
+
style_t = []
|
571 |
+
|
572 |
+
for style in styles:
|
573 |
+
style_t.append(
|
574 |
+
truncation_latent + truncation * (style - truncation_latent)
|
575 |
+
)
|
576 |
+
|
577 |
+
styles = style_t
|
578 |
+
|
579 |
+
if len(styles) < 2:
|
580 |
+
inject_index = self.n_latent
|
581 |
+
|
582 |
+
if len(styles[0].shape) < 3:
|
583 |
+
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
584 |
+
|
585 |
+
else:
|
586 |
+
latent = styles[0]
|
587 |
+
|
588 |
+
else:
|
589 |
+
if inject_index is None:
|
590 |
+
inject_index = random.randint(1, self.n_latent - 1)
|
591 |
+
|
592 |
+
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
593 |
+
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
|
594 |
+
|
595 |
+
latent = torch.cat([latent, latent2], 1)
|
596 |
+
|
597 |
+
out = self.input(latent)
|
598 |
+
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
599 |
+
|
600 |
+
skip = self.to_rgb1(out, latent[:, 1])
|
601 |
+
|
602 |
+
i = 1
|
603 |
+
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
604 |
+
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
605 |
+
):
|
606 |
+
out = conv1(out, latent[:, i], noise=noise1)
|
607 |
+
out = conv2(out, latent[:, i + 1], noise=noise2)
|
608 |
+
skip = to_rgb(out, latent[:, i + 2], skip)
|
609 |
+
|
610 |
+
i += 2
|
611 |
+
|
612 |
+
image = skip
|
613 |
+
|
614 |
+
if return_latents:
|
615 |
+
return image, latent
|
616 |
+
|
617 |
+
else:
|
618 |
+
return image, None
|
619 |
+
|
620 |
+
|
621 |
+
class ConvLayer(nn.Sequential):
|
622 |
+
def __init__(
|
623 |
+
self,
|
624 |
+
in_channel,
|
625 |
+
out_channel,
|
626 |
+
kernel_size,
|
627 |
+
downsample=False,
|
628 |
+
blur_kernel=[1, 3, 3, 1],
|
629 |
+
bias=True,
|
630 |
+
activate=True,
|
631 |
+
):
|
632 |
+
layers = []
|
633 |
+
|
634 |
+
if downsample:
|
635 |
+
factor = 2
|
636 |
+
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
637 |
+
pad0 = (p + 1) // 2
|
638 |
+
pad1 = p // 2
|
639 |
+
|
640 |
+
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
641 |
+
|
642 |
+
stride = 2
|
643 |
+
self.padding = 0
|
644 |
+
|
645 |
+
else:
|
646 |
+
stride = 1
|
647 |
+
self.padding = kernel_size // 2
|
648 |
+
|
649 |
+
layers.append(
|
650 |
+
EqualConv2d(
|
651 |
+
in_channel,
|
652 |
+
out_channel,
|
653 |
+
kernel_size,
|
654 |
+
padding=self.padding,
|
655 |
+
stride=stride,
|
656 |
+
bias=bias and not activate,
|
657 |
+
)
|
658 |
+
)
|
659 |
+
|
660 |
+
if activate:
|
661 |
+
if bias:
|
662 |
+
layers.append(FusedLeakyReLU(out_channel))
|
663 |
+
|
664 |
+
else:
|
665 |
+
layers.append(ScaledLeakyReLU(0.2))
|
666 |
+
|
667 |
+
super().__init__(*layers)
|
668 |
+
|
669 |
+
|
670 |
+
class ResBlock(nn.Module):
|
671 |
+
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1], downsample=True, skip_gain=1.0):
|
672 |
+
super().__init__()
|
673 |
+
|
674 |
+
self.skip_gain = skip_gain
|
675 |
+
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
676 |
+
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=downsample, blur_kernel=blur_kernel)
|
677 |
+
|
678 |
+
if in_channel != out_channel or downsample:
|
679 |
+
self.skip = ConvLayer(
|
680 |
+
in_channel, out_channel, 1, downsample=downsample, activate=False, bias=False
|
681 |
+
)
|
682 |
+
else:
|
683 |
+
self.skip = nn.Identity()
|
684 |
+
|
685 |
+
def forward(self, input):
|
686 |
+
out = self.conv1(input)
|
687 |
+
out = self.conv2(out)
|
688 |
+
|
689 |
+
skip = self.skip(input)
|
690 |
+
out = (out * self.skip_gain + skip) / math.sqrt(self.skip_gain ** 2 + 1.0)
|
691 |
+
|
692 |
+
return out
|
693 |
+
|
694 |
+
|
695 |
+
class StyleGAN2Discriminator(nn.Module):
|
696 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, no_antialias=False, size=None, opt=None):
|
697 |
+
super().__init__()
|
698 |
+
self.opt = opt
|
699 |
+
self.stddev_group = 16
|
700 |
+
if size is None:
|
701 |
+
size = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
|
702 |
+
if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
|
703 |
+
size = 2 ** int(np.log2(self.opt.D_patch_size))
|
704 |
+
|
705 |
+
blur_kernel = [1, 3, 3, 1]
|
706 |
+
channel_multiplier = ndf / 64
|
707 |
+
channels = {
|
708 |
+
4: min(384, int(4096 * channel_multiplier)),
|
709 |
+
8: min(384, int(2048 * channel_multiplier)),
|
710 |
+
16: min(384, int(1024 * channel_multiplier)),
|
711 |
+
32: min(384, int(512 * channel_multiplier)),
|
712 |
+
64: int(256 * channel_multiplier),
|
713 |
+
128: int(128 * channel_multiplier),
|
714 |
+
256: int(64 * channel_multiplier),
|
715 |
+
512: int(32 * channel_multiplier),
|
716 |
+
1024: int(16 * channel_multiplier),
|
717 |
+
}
|
718 |
+
|
719 |
+
convs = [ConvLayer(3, channels[size], 1)]
|
720 |
+
|
721 |
+
log_size = int(math.log(size, 2))
|
722 |
+
|
723 |
+
in_channel = channels[size]
|
724 |
+
|
725 |
+
if "smallpatch" in self.opt.netD:
|
726 |
+
final_res_log2 = 4
|
727 |
+
elif "patch" in self.opt.netD:
|
728 |
+
final_res_log2 = 3
|
729 |
+
else:
|
730 |
+
final_res_log2 = 2
|
731 |
+
|
732 |
+
for i in range(log_size, final_res_log2, -1):
|
733 |
+
out_channel = channels[2 ** (i - 1)]
|
734 |
+
|
735 |
+
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
736 |
+
|
737 |
+
in_channel = out_channel
|
738 |
+
|
739 |
+
self.convs = nn.Sequential(*convs)
|
740 |
+
|
741 |
+
if False and "tile" in self.opt.netD:
|
742 |
+
in_channel += 1
|
743 |
+
self.final_conv = ConvLayer(in_channel, channels[4], 3)
|
744 |
+
if "patch" in self.opt.netD:
|
745 |
+
self.final_linear = ConvLayer(channels[4], 1, 3, bias=False, activate=False)
|
746 |
+
else:
|
747 |
+
self.final_linear = nn.Sequential(
|
748 |
+
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
|
749 |
+
EqualLinear(channels[4], 1),
|
750 |
+
)
|
751 |
+
|
752 |
+
def forward(self, input, get_minibatch_features=False):
|
753 |
+
if "patch" in self.opt.netD and self.opt.D_patch_size is not None:
|
754 |
+
h, w = input.size(2), input.size(3)
|
755 |
+
y = torch.randint(h - self.opt.D_patch_size, ())
|
756 |
+
x = torch.randint(w - self.opt.D_patch_size, ())
|
757 |
+
input = input[:, :, y:y + self.opt.D_patch_size, x:x + self.opt.D_patch_size]
|
758 |
+
out = input
|
759 |
+
for i, conv in enumerate(self.convs):
|
760 |
+
out = conv(out)
|
761 |
+
# print(i, out.abs().mean())
|
762 |
+
# out = self.convs(input)
|
763 |
+
|
764 |
+
batch, channel, height, width = out.shape
|
765 |
+
|
766 |
+
if False and "tile" in self.opt.netD:
|
767 |
+
group = min(batch, self.stddev_group)
|
768 |
+
stddev = out.view(
|
769 |
+
group, -1, 1, channel // 1, height, width
|
770 |
+
)
|
771 |
+
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
772 |
+
stddev = stddev.mean([2, 3, 4], keepdim=True).squeeze(2)
|
773 |
+
stddev = stddev.repeat(group, 1, height, width)
|
774 |
+
out = torch.cat([out, stddev], 1)
|
775 |
+
|
776 |
+
out = self.final_conv(out)
|
777 |
+
# print(out.abs().mean())
|
778 |
+
|
779 |
+
if "patch" not in self.opt.netD:
|
780 |
+
out = out.view(batch, -1)
|
781 |
+
out = self.final_linear(out)
|
782 |
+
|
783 |
+
return out
|
784 |
+
|
785 |
+
|
786 |
+
class TileStyleGAN2Discriminator(StyleGAN2Discriminator):
|
787 |
+
def forward(self, input):
|
788 |
+
B, C, H, W = input.size(0), input.size(1), input.size(2), input.size(3)
|
789 |
+
size = self.opt.D_patch_size
|
790 |
+
Y = H // size
|
791 |
+
X = W // size
|
792 |
+
input = input.view(B, C, Y, size, X, size)
|
793 |
+
input = input.permute(0, 2, 4, 1, 3, 5).contiguous().view(B * Y * X, C, size, size)
|
794 |
+
return super().forward(input)
|
795 |
+
|
796 |
+
|
797 |
+
class StyleGAN2Encoder(nn.Module):
|
798 |
+
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
|
799 |
+
super().__init__()
|
800 |
+
assert opt is not None
|
801 |
+
self.opt = opt
|
802 |
+
channel_multiplier = ngf / 32
|
803 |
+
channels = {
|
804 |
+
4: min(512, int(round(4096 * channel_multiplier))),
|
805 |
+
8: min(512, int(round(2048 * channel_multiplier))),
|
806 |
+
16: min(512, int(round(1024 * channel_multiplier))),
|
807 |
+
32: min(512, int(round(512 * channel_multiplier))),
|
808 |
+
64: int(round(256 * channel_multiplier)),
|
809 |
+
128: int(round(128 * channel_multiplier)),
|
810 |
+
256: int(round(64 * channel_multiplier)),
|
811 |
+
512: int(round(32 * channel_multiplier)),
|
812 |
+
1024: int(round(16 * channel_multiplier)),
|
813 |
+
}
|
814 |
+
|
815 |
+
blur_kernel = [1, 3, 3, 1]
|
816 |
+
|
817 |
+
cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size)))))
|
818 |
+
convs = [nn.Identity(),
|
819 |
+
ConvLayer(3, channels[cur_res], 1)]
|
820 |
+
|
821 |
+
num_downsampling = self.opt.stylegan2_G_num_downsampling
|
822 |
+
for i in range(num_downsampling):
|
823 |
+
in_channel = channels[cur_res]
|
824 |
+
out_channel = channels[cur_res // 2]
|
825 |
+
convs.append(ResBlock(in_channel, out_channel, blur_kernel, downsample=True))
|
826 |
+
cur_res = cur_res // 2
|
827 |
+
|
828 |
+
for i in range(n_blocks // 2):
|
829 |
+
n_channel = channels[cur_res]
|
830 |
+
convs.append(ResBlock(n_channel, n_channel, downsample=False))
|
831 |
+
|
832 |
+
self.convs = nn.Sequential(*convs)
|
833 |
+
|
834 |
+
def forward(self, input, layers=[], get_features=False):
|
835 |
+
feat = input
|
836 |
+
feats = []
|
837 |
+
if -1 in layers:
|
838 |
+
layers.append(len(self.convs) - 1)
|
839 |
+
for layer_id, layer in enumerate(self.convs):
|
840 |
+
feat = layer(feat)
|
841 |
+
# print(layer_id, " features ", feat.abs().mean())
|
842 |
+
if layer_id in layers:
|
843 |
+
feats.append(feat)
|
844 |
+
|
845 |
+
if get_features:
|
846 |
+
return feat, feats
|
847 |
+
else:
|
848 |
+
return feat
|
849 |
+
|
850 |
+
|
851 |
+
class StyleGAN2Decoder(nn.Module):
|
852 |
+
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
|
853 |
+
super().__init__()
|
854 |
+
assert opt is not None
|
855 |
+
self.opt = opt
|
856 |
+
|
857 |
+
blur_kernel = [1, 3, 3, 1]
|
858 |
+
|
859 |
+
channel_multiplier = ngf / 32
|
860 |
+
channels = {
|
861 |
+
4: min(512, int(round(4096 * channel_multiplier))),
|
862 |
+
8: min(512, int(round(2048 * channel_multiplier))),
|
863 |
+
16: min(512, int(round(1024 * channel_multiplier))),
|
864 |
+
32: min(512, int(round(512 * channel_multiplier))),
|
865 |
+
64: int(round(256 * channel_multiplier)),
|
866 |
+
128: int(round(128 * channel_multiplier)),
|
867 |
+
256: int(round(64 * channel_multiplier)),
|
868 |
+
512: int(round(32 * channel_multiplier)),
|
869 |
+
1024: int(round(16 * channel_multiplier)),
|
870 |
+
}
|
871 |
+
|
872 |
+
num_downsampling = self.opt.stylegan2_G_num_downsampling
|
873 |
+
cur_res = 2 ** int((np.rint(np.log2(min(opt.load_size, opt.crop_size))))) // (2 ** num_downsampling)
|
874 |
+
convs = []
|
875 |
+
|
876 |
+
for i in range(n_blocks // 2):
|
877 |
+
n_channel = channels[cur_res]
|
878 |
+
convs.append(ResBlock(n_channel, n_channel, downsample=False))
|
879 |
+
|
880 |
+
for i in range(num_downsampling):
|
881 |
+
in_channel = channels[cur_res]
|
882 |
+
out_channel = channels[cur_res * 2]
|
883 |
+
inject_noise = "small" not in self.opt.netG
|
884 |
+
convs.append(
|
885 |
+
StyledConv(in_channel, out_channel, 3, upsample=True, blur_kernel=blur_kernel, inject_noise=inject_noise)
|
886 |
+
)
|
887 |
+
cur_res = cur_res * 2
|
888 |
+
|
889 |
+
convs.append(ConvLayer(channels[cur_res], 3, 1))
|
890 |
+
|
891 |
+
self.convs = nn.Sequential(*convs)
|
892 |
+
|
893 |
+
def forward(self, input):
|
894 |
+
return self.convs(input)
|
895 |
+
|
896 |
+
|
897 |
+
class StyleGAN2Generator(nn.Module):
|
898 |
+
def __init__(self, input_nc, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect', no_antialias=False, opt=None):
|
899 |
+
super().__init__()
|
900 |
+
self.opt = opt
|
901 |
+
self.encoder = StyleGAN2Encoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
|
902 |
+
self.decoder = StyleGAN2Decoder(input_nc, output_nc, ngf, use_dropout, n_blocks, padding_type, no_antialias, opt)
|
903 |
+
|
904 |
+
def forward(self, input, layers=[], encode_only=False):
|
905 |
+
feat, feats = self.encoder(input, layers, True)
|
906 |
+
if encode_only:
|
907 |
+
return feats
|
908 |
+
else:
|
909 |
+
fake = self.decoder(feat)
|
910 |
+
|
911 |
+
if len(layers) > 0:
|
912 |
+
return fake, feats
|
913 |
+
else:
|
914 |
+
return fake
|
models/template_model.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Model class template
|
2 |
+
|
3 |
+
This module provides a template for users to implement custom models.
|
4 |
+
You can specify '--model template' to use this model.
|
5 |
+
The class name should be consistent with both the filename and its model option.
|
6 |
+
The filename should be <model>_dataset.py
|
7 |
+
The class name should be <Model>Dataset.py
|
8 |
+
It implements a simple image-to-image translation baseline based on regression loss.
|
9 |
+
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
|
10 |
+
min_<netG> ||netG(data_A) - data_B||_1
|
11 |
+
You need to implement the following functions:
|
12 |
+
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
|
13 |
+
<__init__>: Initialize this model class.
|
14 |
+
<set_input>: Unpack input data and perform data pre-processing.
|
15 |
+
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
|
16 |
+
<optimize_parameters>: Update network weights; it will be called in every training iteration.
|
17 |
+
"""
|
18 |
+
import torch
|
19 |
+
from .base_model import BaseModel
|
20 |
+
from . import networks
|
21 |
+
|
22 |
+
|
23 |
+
class TemplateModel(BaseModel):
|
24 |
+
@staticmethod
|
25 |
+
def modify_commandline_options(parser, is_train=True):
|
26 |
+
"""Add new model-specific options and rewrite default values for existing options.
|
27 |
+
|
28 |
+
Parameters:
|
29 |
+
parser -- the option parser
|
30 |
+
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
the modified parser.
|
34 |
+
"""
|
35 |
+
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
|
36 |
+
if is_train:
|
37 |
+
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
|
38 |
+
|
39 |
+
return parser
|
40 |
+
|
41 |
+
def __init__(self, opt):
|
42 |
+
"""Initialize this model class.
|
43 |
+
|
44 |
+
Parameters:
|
45 |
+
opt -- training/test options
|
46 |
+
|
47 |
+
A few things can be done here.
|
48 |
+
- (required) call the initialization function of BaseModel
|
49 |
+
- define loss function, visualization images, model names, and optimizers
|
50 |
+
"""
|
51 |
+
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
|
52 |
+
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
|
53 |
+
self.loss_names = ['loss_G']
|
54 |
+
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
|
55 |
+
self.visual_names = ['data_A', 'data_B', 'output']
|
56 |
+
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
|
57 |
+
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
|
58 |
+
self.model_names = ['G']
|
59 |
+
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
|
60 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
|
61 |
+
if self.isTrain: # only defined during training time
|
62 |
+
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
|
63 |
+
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
|
64 |
+
self.criterionLoss = torch.nn.L1Loss()
|
65 |
+
# define and initialize optimizers. You can define one optimizer for each network.
|
66 |
+
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
67 |
+
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
|
68 |
+
self.optimizers = [self.optimizer]
|
69 |
+
|
70 |
+
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
|
71 |
+
|
72 |
+
def set_input(self, input):
|
73 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
74 |
+
|
75 |
+
Parameters:
|
76 |
+
input: a dictionary that contains the data itself and its metadata information.
|
77 |
+
"""
|
78 |
+
AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B
|
79 |
+
self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
|
80 |
+
self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
|
81 |
+
self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
|
82 |
+
|
83 |
+
def forward(self):
|
84 |
+
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
|
85 |
+
self.output = self.netG(self.data_A) # generate output image given the input data_A
|
86 |
+
|
87 |
+
def backward(self):
|
88 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
89 |
+
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
|
90 |
+
# calculate loss given the input and intermediate results
|
91 |
+
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
|
92 |
+
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
|
93 |
+
|
94 |
+
def optimize_parameters(self):
|
95 |
+
"""Update network weights; it will be called in every training iteration."""
|
96 |
+
self.forward() # first call forward to calculate intermediate results
|
97 |
+
self.optimizer.zero_grad() # clear network G's existing gradients
|
98 |
+
self.backward() # calculate gradients for network G
|
99 |
+
self.optimizer.step() # update gradients for network G
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch>=1.4.0
|
2 |
+
torchvision>=0.5.0
|
3 |
+
dominate>=2.4.0
|
4 |
+
visdom>=0.1.8.8
|
5 |
+
packaging
|
6 |
+
GPUtil>=1.4.0
|
7 |
+
gradio
|